summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2021-10-10 11:10:43 +0200
committerMichaël Zasso <targos@protonmail.com>2021-10-12 08:07:50 +0200
commit62719c5fd2ab7dee1ac4019c1715061d556ac457 (patch)
tree356fed3842e577ab58fd51d5cc02f071cf7ee216 /deps/v8/src
parenta784258444b052dfd31cca90db57b21dc38bb1eb (diff)
downloadnode-new-62719c5fd2ab7dee1ac4019c1715061d556ac457.tar.gz
deps: update V8 to 9.5.172.19
PR-URL: https://github.com/nodejs/node/pull/40178 Reviewed-By: Antoine du Hamel <duhamelantoine1995@gmail.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/DEPS5
-rw-r--r--deps/v8/src/api/api-arguments.h1
-rw-r--r--deps/v8/src/api/api-natives.h2
-rw-r--r--deps/v8/src/api/api.cc276
-rw-r--r--deps/v8/src/api/api.h13
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc8
-rw-r--r--deps/v8/src/ast/prettyprinter.cc29
-rw-r--r--deps/v8/src/ast/prettyprinter.h1
-rw-r--r--deps/v8/src/base/atomicops.h25
-rw-r--r--deps/v8/src/base/bounded-page-allocator.cc58
-rw-r--r--deps/v8/src/base/bounded-page-allocator.h2
-rw-r--r--deps/v8/src/base/build_config.h12
-rw-r--r--deps/v8/src/base/compiler-specific.h12
-rw-r--r--deps/v8/src/base/flags.h66
-rw-r--r--deps/v8/src/base/optional.h2
-rw-r--r--deps/v8/src/base/page-allocator.cc4
-rw-r--r--deps/v8/src/base/page-allocator.h2
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc5
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc56
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc15
-rw-r--r--deps/v8/src/base/platform/platform.h2
-rw-r--r--deps/v8/src/base/region-allocator.cc29
-rw-r--r--deps/v8/src/base/region-allocator.h5
-rw-r--r--deps/v8/src/base/sanitizer/asan.h7
-rw-r--r--deps/v8/src/base/sanitizer/tsan.h20
-rw-r--r--deps/v8/src/base/win32-headers.h6
-rw-r--r--deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h13
-rw-r--r--deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h12
-rw-r--r--deps/v8/src/baseline/baseline-assembler-inl.h20
-rw-r--r--deps/v8/src/baseline/baseline-assembler.h15
-rw-r--r--deps/v8/src/baseline/baseline-batch-compiler.cc7
-rw-r--r--deps/v8/src/baseline/baseline-compiler.cc90
-rw-r--r--deps/v8/src/baseline/baseline-compiler.h1
-rw-r--r--deps/v8/src/baseline/baseline.cc7
-rw-r--r--deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h15
-rw-r--r--deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h503
-rw-r--r--deps/v8/src/baseline/loong64/baseline-compiler-loong64-inl.h77
-rw-r--r--deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h6
-rw-r--r--deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h6
-rw-r--r--deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h270
-rw-r--r--deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h64
-rw-r--r--deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h15
-rw-r--r--deps/v8/src/bigint/bigint-internal.h3
-rw-r--r--deps/v8/src/bigint/bigint.h67
-rw-r--r--deps/v8/src/bigint/fromstring.cc263
-rw-r--r--deps/v8/src/builtins/accessors.cc1
-rw-r--r--deps/v8/src/builtins/accessors.h2
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc316
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc283
-rw-r--r--deps/v8/src/builtins/array-concat.tq2
-rw-r--r--deps/v8/src/builtins/array-shift.tq2
-rw-r--r--deps/v8/src/builtins/array-unshift.tq2
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc61
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-async-iterator-gen.cc6
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc7
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc1
-rw-r--r--deps/v8/src/builtins/builtins-date.cc79
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h223
-rw-r--r--deps/v8/src/builtins/builtins-descriptors.h27
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc18
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc103
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc9
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.cc3
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc9
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc16
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc88
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc45
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc6
-rw-r--r--deps/v8/src/builtins/builtins-string.tq2
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc11
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h1
-rw-r--r--deps/v8/src/builtins/console.tq3
-rw-r--r--deps/v8/src/builtins/convert.tq3
-rw-r--r--deps/v8/src/builtins/frame-arguments.tq8
-rw-r--r--deps/v8/src/builtins/frames.tq6
-rw-r--r--deps/v8/src/builtins/function.tq2
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc361
-rw-r--r--deps/v8/src/builtins/loong64/builtins-loong64.cc3755
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc42
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc43
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc9
-rw-r--r--deps/v8/src/builtins/riscv64/builtins-riscv64.cc128
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc9
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc15
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq1
-rw-r--r--deps/v8/src/builtins/typed-array-every.tq25
-rw-r--r--deps/v8/src/builtins/typed-array-filter.tq12
-rw-r--r--deps/v8/src/builtins/typed-array-find.tq35
-rw-r--r--deps/v8/src/builtins/typed-array-findindex.tq28
-rw-r--r--deps/v8/src/builtins/typed-array-findlast.tq56
-rw-r--r--deps/v8/src/builtins/typed-array-findlastindex.tq57
-rw-r--r--deps/v8/src/builtins/typed-array-foreach.tq23
-rw-r--r--deps/v8/src/builtins/typed-array-reduce.tq12
-rw-r--r--deps/v8/src/builtins/typed-array-reduceright.tq12
-rw-r--r--deps/v8/src/builtins/typed-array-set.tq12
-rw-r--r--deps/v8/src/builtins/typed-array-some.tq28
-rw-r--r--deps/v8/src/builtins/typed-array.tq4
-rw-r--r--deps/v8/src/builtins/wasm.tq10
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc347
-rw-r--r--deps/v8/src/codegen/OWNERS3
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc5
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc59
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h2
-rw-r--r--deps/v8/src/codegen/arm/register-arm.h1
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc89
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h68
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h4
-rw-r--r--deps/v8/src/codegen/assembler-arch.h2
-rw-r--r--deps/v8/src/codegen/assembler-inl.h2
-rw-r--r--deps/v8/src/codegen/assembler.cc6
-rw-r--r--deps/v8/src/codegen/assembler.h10
-rw-r--r--deps/v8/src/codegen/atomic-memory-order.h35
-rw-r--r--deps/v8/src/codegen/code-factory.cc57
-rw-r--r--deps/v8/src/codegen/code-factory.h3
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc123
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h92
-rw-r--r--deps/v8/src/codegen/compiler.cc111
-rw-r--r--deps/v8/src/codegen/compiler.h35
-rw-r--r--deps/v8/src/codegen/constant-pool.cc3
-rw-r--r--deps/v8/src/codegen/constants-arch.h2
-rw-r--r--deps/v8/src/codegen/cpu-features.h3
-rw-r--r--deps/v8/src/codegen/external-reference.cc69
-rw-r--r--deps/v8/src/codegen/external-reference.h24
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc10
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h21
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc402
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h50
-rw-r--r--deps/v8/src/codegen/ia32/register-ia32.h3
-rw-r--r--deps/v8/src/codegen/ia32/sse-instr.h6
-rw-r--r--deps/v8/src/codegen/interface-descriptors-inl.h11
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h16
-rw-r--r--deps/v8/src/codegen/loong64/assembler-loong64-inl.h249
-rw-r--r--deps/v8/src/codegen/loong64/assembler-loong64.cc2405
-rw-r--r--deps/v8/src/codegen/loong64/assembler-loong64.h1129
-rw-r--r--deps/v8/src/codegen/loong64/constants-loong64.cc100
-rw-r--r--deps/v8/src/codegen/loong64/constants-loong64.h1291
-rw-r--r--deps/v8/src/codegen/loong64/cpu-loong64.cc38
-rw-r--r--deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h278
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.cc4107
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.h1062
-rw-r--r--deps/v8/src/codegen/loong64/register-loong64.h288
-rw-r--r--deps/v8/src/codegen/macro-assembler.h3
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc1
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc17
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h6
-rw-r--r--deps/v8/src/codegen/mips/register-mips.h1
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc17
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h6
-rw-r--r--deps/v8/src/codegen/mips64/register-mips64.h1
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc36
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.h44
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc9
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h48
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.h35
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc72
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h18
-rw-r--r--deps/v8/src/codegen/ppc/register-ppc.h1
-rw-r--r--deps/v8/src/codegen/register-arch.h2
-rw-r--r--deps/v8/src/codegen/register-configuration.cc42
-rw-r--r--deps/v8/src/codegen/reloc-info.cc2
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.cc663
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.h286
-rw-r--r--deps/v8/src/codegen/riscv64/constants-riscv64.cc41
-rw-r--r--deps/v8/src/codegen/riscv64/constants-riscv64.h546
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc164
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h50
-rw-r--r--deps/v8/src/codegen/riscv64/register-riscv64.h81
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc1
-rw-r--r--deps/v8/src/codegen/s390/constants-s390.h20
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc246
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h184
-rw-r--r--deps/v8/src/codegen/s390/register-s390.h1
-rw-r--r--deps/v8/src/codegen/script-details.h1
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc372
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h442
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64-inl.h6
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc53
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h32
-rw-r--r--deps/v8/src/codegen/x64/fma-instr.h8
-rw-r--r--deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h4
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc645
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h137
-rw-r--r--deps/v8/src/codegen/x64/register-x64.h54
-rw-r--r--deps/v8/src/codegen/x64/sse-instr.h1
-rw-r--r--deps/v8/src/common/globals.h41
-rw-r--r--deps/v8/src/common/message-template.h5
-rw-r--r--deps/v8/src/compiler-dispatcher/OWNERS1
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc19
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h4
-rw-r--r--deps/v8/src/compiler/OWNERS1
-rw-r--r--deps/v8/src/compiler/access-builder.cc122
-rw-r--r--deps/v8/src/compiler/access-builder.h10
-rw-r--r--deps/v8/src/compiler/access-info.cc14
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc197
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc207
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc641
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h748
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc196
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc802
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc104
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h37
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc936
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h144
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc168
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc574
-rw-r--r--deps/v8/src/compiler/backend/instruction-codes.h138
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.cc93
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc149
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h40
-rw-r--r--deps/v8/src/compiler/backend/instruction.cc4
-rw-r--r--deps/v8/src/compiler/backend/instruction.h3
-rw-r--r--deps/v8/src/compiler/backend/jump-threading.cc156
-rw-r--r--deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc2636
-rw-r--r--deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h397
-rw-r--r--deps/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc26
-rw-r--r--deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc3124
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc191
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc44
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc67
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc392
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h800
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc66
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc288
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc103
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h11
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc11
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc103
-rw-r--r--deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc1008
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h33
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc71
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc390
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc440
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-codes-s390.h44
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc44
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc227
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc1062
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h790
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc42
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc363
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc17
-rw-r--r--deps/v8/src/compiler/branch-elimination.h1
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc17
-rw-r--r--deps/v8/src/compiler/c-linkage.cc12
-rw-r--r--deps/v8/src/compiler/code-assembler.cc82
-rw-r--r--deps/v8/src/compiler/code-assembler.h63
-rw-r--r--deps/v8/src/compiler/common-operator.cc212
-rw-r--r--deps/v8/src/compiler/common-operator.h62
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc183
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h5
-rw-r--r--deps/v8/src/compiler/compilation-dependency.h37
-rw-r--r--deps/v8/src/compiler/decompression-optimizer.cc7
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc67
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h4
-rw-r--r--deps/v8/src/compiler/frame-states.cc7
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc41
-rw-r--r--deps/v8/src/compiler/graph-assembler.h42
-rw-r--r--deps/v8/src/compiler/heap-refs.cc893
-rw-r--r--deps/v8/src/compiler/heap-refs.h125
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc18
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc58
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc22
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc59
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc16
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc35
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h28
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc77
-rw-r--r--deps/v8/src/compiler/js-inlining.cc7
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc68
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc33
-rw-r--r--deps/v8/src/compiler/linkage.cc7
-rw-r--r--deps/v8/src/compiler/linkage.h18
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc19
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc38
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc15
-rw-r--r--deps/v8/src/compiler/machine-operator.cc358
-rw-r--r--deps/v8/src/compiler/machine-operator.h86
-rw-r--r--deps/v8/src/compiler/memory-lowering.cc33
-rw-r--r--deps/v8/src/compiler/memory-lowering.h3
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc11
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h1
-rw-r--r--deps/v8/src/compiler/node-matchers.h1
-rw-r--r--deps/v8/src/compiler/opcodes.h5
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.cc27
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.h9
-rw-r--r--deps/v8/src/compiler/pipeline.cc103
-rw-r--r--deps/v8/src/compiler/pipeline.h8
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc8
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc23
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h74
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc28
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h3
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc267
-rw-r--r--deps/v8/src/compiler/simplified-operator.h19
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc6
-rw-r--r--deps/v8/src/compiler/typer.cc45
-rw-r--r--deps/v8/src/compiler/verifier.cc8
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc274
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h23
-rw-r--r--deps/v8/src/compiler/wasm-inlining.cc195
-rw-r--r--deps/v8/src/compiler/wasm-inlining.h77
-rw-r--r--deps/v8/src/d8/async-hooks-wrapper.cc198
-rw-r--r--deps/v8/src/d8/async-hooks-wrapper.h9
-rw-r--r--deps/v8/src/d8/d8-platforms.cc11
-rw-r--r--deps/v8/src/d8/d8-posix.cc2
-rw-r--r--deps/v8/src/d8/d8-test.cc18
-rw-r--r--deps/v8/src/d8/d8.cc97
-rw-r--r--deps/v8/src/d8/d8.h11
-rw-r--r--deps/v8/src/date/date.cc78
-rw-r--r--deps/v8/src/date/date.h11
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc16
-rw-r--r--deps/v8/src/debug/debug-interface.cc17
-rw-r--r--deps/v8/src/debug/debug-interface.h16
-rw-r--r--deps/v8/src/debug/debug-property-iterator.h8
-rw-r--r--deps/v8/src/debug/debug.cc35
-rw-r--r--deps/v8/src/debug/interface-types.h5
-rw-r--r--deps/v8/src/deoptimizer/deoptimized-frame-info.cc10
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc47
-rw-r--r--deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc42
-rw-r--r--deps/v8/src/deoptimizer/translated-state.cc32
-rw-r--r--deps/v8/src/diagnostics/arm/disasm-arm.cc13
-rw-r--r--deps/v8/src/diagnostics/arm/eh-frame-arm.cc2
-rw-r--r--deps/v8/src/diagnostics/arm/unwinder-arm.cc2
-rw-r--r--deps/v8/src/diagnostics/arm64/disasm-arm64.cc6
-rw-r--r--deps/v8/src/diagnostics/arm64/eh-frame-arm64.cc2
-rw-r--r--deps/v8/src/diagnostics/compilation-statistics.cc23
-rw-r--r--deps/v8/src/diagnostics/compilation-statistics.h2
-rw-r--r--deps/v8/src/diagnostics/eh-frame.cc2
-rw-r--r--deps/v8/src/diagnostics/gdb-jit.cc206
-rw-r--r--deps/v8/src/diagnostics/gdb-jit.h12
-rw-r--r--deps/v8/src/diagnostics/ia32/disasm-ia32.cc58
-rw-r--r--deps/v8/src/diagnostics/loong64/disasm-loong64.cc1697
-rw-r--r--deps/v8/src/diagnostics/loong64/unwinder-loong64.cc14
-rw-r--r--deps/v8/src/diagnostics/mips/disasm-mips.cc8
-rw-r--r--deps/v8/src/diagnostics/mips64/disasm-mips64.cc9
-rw-r--r--deps/v8/src/diagnostics/objects-debug.cc15
-rw-r--r--deps/v8/src/diagnostics/objects-printer.cc8
-rw-r--r--deps/v8/src/diagnostics/perf-jit.h3
-rw-r--r--deps/v8/src/diagnostics/ppc/disasm-ppc.cc31
-rw-r--r--deps/v8/src/diagnostics/ppc/eh-frame-ppc.cc2
-rw-r--r--deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc1086
-rw-r--r--deps/v8/src/diagnostics/s390/eh-frame-s390.cc2
-rw-r--r--deps/v8/src/diagnostics/system-jit-win.cc6
-rw-r--r--deps/v8/src/diagnostics/unwinder.cc2
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.cc30
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.h4
-rw-r--r--deps/v8/src/diagnostics/x64/disasm-x64.cc507
-rw-r--r--deps/v8/src/execution/OWNERS1
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.cc69
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.cc3
-rw-r--r--deps/v8/src/execution/execution.cc4
-rw-r--r--deps/v8/src/execution/frame-constants.h6
-rw-r--r--deps/v8/src/execution/frames.cc41
-rw-r--r--deps/v8/src/execution/frames.h12
-rw-r--r--deps/v8/src/execution/futex-emulation.h4
-rw-r--r--deps/v8/src/execution/isolate.cc49
-rw-r--r--deps/v8/src/execution/isolate.h6
-rw-r--r--deps/v8/src/execution/loong64/frame-constants-loong64.cc32
-rw-r--r--deps/v8/src/execution/loong64/frame-constants-loong64.h76
-rw-r--r--deps/v8/src/execution/loong64/simulator-loong64.cc5538
-rw-r--r--deps/v8/src/execution/loong64/simulator-loong64.h647
-rw-r--r--deps/v8/src/execution/messages.cc87
-rw-r--r--deps/v8/src/execution/messages.h1
-rw-r--r--deps/v8/src/execution/microtask-queue.h3
-rw-r--r--deps/v8/src/execution/mips/simulator-mips.cc23
-rw-r--r--deps/v8/src/execution/mips64/simulator-mips64.cc20
-rw-r--r--deps/v8/src/execution/mips64/simulator-mips64.h4
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.cc79
-rw-r--r--deps/v8/src/execution/riscv64/simulator-riscv64.cc1536
-rw-r--r--deps/v8/src/execution/riscv64/simulator-riscv64.h336
-rw-r--r--deps/v8/src/execution/runtime-profiler.cc23
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.cc232
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.h20
-rw-r--r--deps/v8/src/execution/simulator-base.h6
-rw-r--r--deps/v8/src/execution/simulator.h2
-rw-r--r--deps/v8/src/execution/thread-local-top.h9
-rw-r--r--deps/v8/src/execution/v8threads.cc1
-rw-r--r--deps/v8/src/execution/vm-state.h2
-rw-r--r--deps/v8/src/extensions/cputracemark-extension.cc3
-rw-r--r--deps/v8/src/extensions/cputracemark-extension.h6
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc1
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.h6
-rw-r--r--deps/v8/src/extensions/gc-extension.cc6
-rw-r--r--deps/v8/src/extensions/gc-extension.h7
-rw-r--r--deps/v8/src/extensions/ignition-statistics-extension.cc9
-rw-r--r--deps/v8/src/extensions/ignition-statistics-extension.h6
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc1
-rw-r--r--deps/v8/src/extensions/statistics-extension.h6
-rw-r--r--deps/v8/src/extensions/trigger-failure-extension.cc1
-rw-r--r--deps/v8/src/extensions/trigger-failure-extension.h6
-rw-r--r--deps/v8/src/extensions/vtunedomain-support-extension.cc4
-rw-r--r--deps/v8/src/extensions/vtunedomain-support-extension.h6
-rw-r--r--deps/v8/src/flags/flag-definitions.h80
-rw-r--r--deps/v8/src/handles/DIR_METADATA4
-rw-r--r--deps/v8/src/handles/global-handles.cc2
-rw-r--r--deps/v8/src/handles/global-handles.h3
-rw-r--r--deps/v8/src/handles/handles.h4
-rw-r--r--deps/v8/src/heap/DIR_METADATA4
-rw-r--r--deps/v8/src/heap/array-buffer-sweeper.cc6
-rw-r--r--deps/v8/src/heap/base/asm/loong64/push_registers_asm.cc48
-rw-r--r--deps/v8/src/heap/base/stack.cc10
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.cc24
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.h104
-rw-r--r--deps/v8/src/heap/cppgc-js/DEPS3
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.cc25
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.h4
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-snapshot.cc77
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h1
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc1
-rw-r--r--deps/v8/src/heap/cppgc/DEPS3
-rw-r--r--deps/v8/src/heap/cppgc/caged-heap-local-data.cc10
-rw-r--r--deps/v8/src/heap/cppgc/caged-heap.cc31
-rw-r--r--deps/v8/src/heap/cppgc/caged-heap.h20
-rw-r--r--deps/v8/src/heap/cppgc/gc-info.cc77
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.cc33
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.h11
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.h7
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc14
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc11
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.h24
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.cc12
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.h3
-rw-r--r--deps/v8/src/heap/cppgc/memory.cc2
-rw-r--r--deps/v8/src/heap/cppgc/memory.h6
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.cc62
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.h17
-rw-r--r--deps/v8/src/heap/cppgc/page-memory.cc118
-rw-r--r--deps/v8/src/heap/cppgc/page-memory.h25
-rw-r--r--deps/v8/src/heap/cppgc/platform.cc44
-rw-r--r--deps/v8/src/heap/cppgc/platform.h43
-rw-r--r--deps/v8/src/heap/cppgc/prefinalizer-handler.cc27
-rw-r--r--deps/v8/src/heap/cppgc/prefinalizer-handler.h9
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.cc22
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.h7
-rw-r--r--deps/v8/src/heap/cppgc/visitor.cc7
-rw-r--r--deps/v8/src/heap/cppgc/write-barrier.cc4
-rw-r--r--deps/v8/src/heap/embedder-tracing.h3
-rw-r--r--deps/v8/src/heap/factory-base.cc3
-rw-r--r--deps/v8/src/heap/factory-inl.h9
-rw-r--r--deps/v8/src/heap/factory.cc32
-rw-r--r--deps/v8/src/heap/factory.h8
-rw-r--r--deps/v8/src/heap/heap.cc30
-rw-r--r--deps/v8/src/heap/heap.h6
-rw-r--r--deps/v8/src/heap/large-spaces.cc2
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h4
-rw-r--r--deps/v8/src/heap/mark-compact.cc49
-rw-r--r--deps/v8/src/heap/mark-compact.h4
-rw-r--r--deps/v8/src/heap/marking-visitor-inl.h26
-rw-r--r--deps/v8/src/heap/marking-visitor.h2
-rw-r--r--deps/v8/src/heap/memory-chunk-layout.h3
-rw-r--r--deps/v8/src/heap/memory-chunk.cc2
-rw-r--r--deps/v8/src/heap/memory-chunk.h24
-rw-r--r--deps/v8/src/heap/memory-measurement.cc2
-rw-r--r--deps/v8/src/heap/memory-measurement.h1
-rw-r--r--deps/v8/src/heap/new-spaces.cc15
-rw-r--r--deps/v8/src/heap/new-spaces.h2
-rw-r--r--deps/v8/src/heap/objects-visiting.cc6
-rw-r--r--deps/v8/src/heap/progress-bar.h61
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc2
-rw-r--r--deps/v8/src/heap/spaces.cc5
-rw-r--r--deps/v8/src/heap/spaces.h10
-rw-r--r--deps/v8/src/heap/third-party/heap-api.h4
-rw-r--r--deps/v8/src/ic/OWNERS1
-rw-r--r--deps/v8/src/init/bootstrapper.cc49
-rw-r--r--deps/v8/src/init/bootstrapper.h3
-rw-r--r--deps/v8/src/init/heap-symbols.h11
-rw-r--r--deps/v8/src/init/isolate-allocator.cc24
-rw-r--r--deps/v8/src/init/startup-data-util.cc7
-rw-r--r--deps/v8/src/init/startup-data-util.h2
-rw-r--r--deps/v8/src/init/v8.cc28
-rw-r--r--deps/v8/src/init/v8.h4
-rw-r--r--deps/v8/src/init/vm-cage.cc81
-rw-r--r--deps/v8/src/init/vm-cage.h130
-rw-r--r--deps/v8/src/inspector/DEPS1
-rw-r--r--deps/v8/src/inspector/custom-preview.cc5
-rw-r--r--deps/v8/src/inspector/injected-script.cc11
-rw-r--r--deps/v8/src/inspector/injected-script.h8
-rw-r--r--deps/v8/src/inspector/inspected-context.cc4
-rw-r--r--deps/v8/src/inspector/inspected-context.h8
-rw-r--r--deps/v8/src/inspector/test-interface.h2
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc4
-rw-r--r--deps/v8/src/inspector/v8-console-message.h3
-rw-r--r--deps/v8/src/inspector/v8-console.cc7
-rw-r--r--deps/v8/src/inspector/v8-console.h9
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc3
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.h8
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc11
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc1
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.h4
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.cc36
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.cc120
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.h15
-rw-r--r--deps/v8/src/inspector/v8-regex.cc8
-rw-r--r--deps/v8/src/inspector/v8-regex.h5
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc9
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.h8
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h7
-rw-r--r--deps/v8/src/inspector/v8-value-utils.cc4
-rw-r--r--deps/v8/src/inspector/v8-value-utils.h3
-rw-r--r--deps/v8/src/inspector/value-mirror.cc37
-rw-r--r--deps/v8/src/inspector/value-mirror.h4
-rw-r--r--deps/v8/src/interpreter/OWNERS3
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc25
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h1
-rw-r--r--deps/v8/src/interpreter/bytecodes.h4
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc161
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h53
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc47
-rw-r--r--deps/v8/src/interpreter/interpreter.cc32
-rw-r--r--deps/v8/src/interpreter/interpreter.h2
-rw-r--r--deps/v8/src/json/json-parser.h1
-rw-r--r--deps/v8/src/libplatform/default-platform.cc1
-rw-r--r--deps/v8/src/libsampler/sampler.cc7
-rw-r--r--deps/v8/src/libsampler/sampler.h6
-rw-r--r--deps/v8/src/logging/counters.h2
-rw-r--r--deps/v8/src/logging/log.cc20
-rw-r--r--deps/v8/src/logging/log.h5
-rw-r--r--deps/v8/src/logging/runtime-call-stats.cc11
-rw-r--r--deps/v8/src/logging/runtime-call-stats.h6
-rw-r--r--deps/v8/src/objects/allocation-site-inl.h30
-rw-r--r--deps/v8/src/objects/allocation-site.h2
-rw-r--r--deps/v8/src/objects/arguments.h17
-rw-r--r--deps/v8/src/objects/arguments.tq4
-rw-r--r--deps/v8/src/objects/backing-store.cc52
-rw-r--r--deps/v8/src/objects/backing-store.h2
-rw-r--r--deps/v8/src/objects/bigint.cc13
-rw-r--r--deps/v8/src/objects/cell-inl.h4
-rw-r--r--deps/v8/src/objects/cell.h3
-rw-r--r--deps/v8/src/objects/code-inl.h82
-rw-r--r--deps/v8/src/objects/code.cc6
-rw-r--r--deps/v8/src/objects/code.h18
-rw-r--r--deps/v8/src/objects/contexts.h6
-rw-r--r--deps/v8/src/objects/feedback-cell-inl.h2
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h2
-rw-r--r--deps/v8/src/objects/fixed-array.h11
-rw-r--r--deps/v8/src/objects/instance-type.h6
-rw-r--r--deps/v8/src/objects/intl-objects.cc185
-rw-r--r--deps/v8/src/objects/intl-objects.h17
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h55
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc1
-rw-r--r--deps/v8/src/objects/js-array-buffer.h19
-rw-r--r--deps/v8/src/objects/js-array-inl.h10
-rw-r--r--deps/v8/src/objects/js-array.h49
-rw-r--r--deps/v8/src/objects/js-array.tq27
-rw-r--r--deps/v8/src/objects/js-date-time-format.cc4
-rw-r--r--deps/v8/src/objects/js-function-inl.h14
-rw-r--r--deps/v8/src/objects/js-function.cc85
-rw-r--r--deps/v8/src/objects/js-function.h37
-rw-r--r--deps/v8/src/objects/js-function.tq1
-rw-r--r--deps/v8/src/objects/js-list-format.cc3
-rw-r--r--deps/v8/src/objects/js-locale.cc120
-rw-r--r--deps/v8/src/objects/js-number-format.cc18
-rw-r--r--deps/v8/src/objects/js-objects-inl.h15
-rw-r--r--deps/v8/src/objects/js-objects.h53
-rw-r--r--deps/v8/src/objects/js-objects.tq11
-rw-r--r--deps/v8/src/objects/js-promise.h1
-rw-r--r--deps/v8/src/objects/js-proxy.h8
-rw-r--r--deps/v8/src/objects/js-proxy.tq1
-rw-r--r--deps/v8/src/objects/js-regexp-inl.h16
-rw-r--r--deps/v8/src/objects/js-regexp.cc86
-rw-r--r--deps/v8/src/objects/js-regexp.h91
-rw-r--r--deps/v8/src/objects/js-regexp.tq3
-rw-r--r--deps/v8/src/objects/js-relative-time-format.cc6
-rw-r--r--deps/v8/src/objects/js-weak-refs-inl.h13
-rw-r--r--deps/v8/src/objects/js-weak-refs.h22
-rw-r--r--deps/v8/src/objects/js-weak-refs.tq1
-rw-r--r--deps/v8/src/objects/keys.h13
-rw-r--r--deps/v8/src/objects/map-inl.h27
-rw-r--r--deps/v8/src/objects/map.h7
-rw-r--r--deps/v8/src/objects/module.h1
-rw-r--r--deps/v8/src/objects/object-macros-undef.h2
-rw-r--r--deps/v8/src/objects/object-macros.h14
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h6
-rw-r--r--deps/v8/src/objects/objects-definitions.h1
-rw-r--r--deps/v8/src/objects/objects.cc10
-rw-r--r--deps/v8/src/objects/objects.h13
-rw-r--r--deps/v8/src/objects/ordered-hash-table.h1
-rw-r--r--deps/v8/src/objects/property-cell-inl.h3
-rw-r--r--deps/v8/src/objects/property-details.h35
-rw-r--r--deps/v8/src/objects/script.h5
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h198
-rw-r--r--deps/v8/src/objects/shared-function-info.cc20
-rw-r--r--deps/v8/src/objects/shared-function-info.h34
-rw-r--r--deps/v8/src/objects/shared-function-info.tq49
-rw-r--r--deps/v8/src/objects/tagged-field.h2
-rw-r--r--deps/v8/src/objects/tagged-impl.h1
-rw-r--r--deps/v8/src/objects/value-serializer.cc4
-rw-r--r--deps/v8/src/objects/value-serializer.h2
-rw-r--r--deps/v8/src/objects/visitors.h2
-rw-r--r--deps/v8/src/parsing/parse-info.h1
-rw-r--r--deps/v8/src/parsing/parser-base.h119
-rw-r--r--deps/v8/src/parsing/parser.h15
-rw-r--r--deps/v8/src/parsing/pending-compilation-error-handler.cc80
-rw-r--r--deps/v8/src/parsing/pending-compilation-error-handler.h74
-rw-r--r--deps/v8/src/parsing/preparse-data.cc10
-rw-r--r--deps/v8/src/parsing/preparser.h24
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc3
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.h2
-rw-r--r--deps/v8/src/parsing/scanner.cc18
-rw-r--r--deps/v8/src/parsing/scanner.h4
-rw-r--r--deps/v8/src/profiler/allocation-tracker.h2
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc11
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h1
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc30
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h2
-rw-r--r--deps/v8/src/profiler/profile-generator.cc49
-rw-r--r--deps/v8/src/profiler/profile-generator.h4
-rw-r--r--deps/v8/src/profiler/strings-storage.cc8
-rw-r--r--deps/v8/src/profiler/strings-storage.h4
-rw-r--r--deps/v8/src/profiler/tick-sample.cc2
-rw-r--r--deps/v8/src/profiler/tick-sample.h2
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc1
-rw-r--r--deps/v8/src/regexp/experimental/experimental-compiler.cc27
-rw-r--r--deps/v8/src/regexp/experimental/experimental-compiler.h5
-rw-r--r--deps/v8/src/regexp/experimental/experimental-interpreter.h5
-rw-r--r--deps/v8/src/regexp/experimental/experimental.cc24
-rw-r--r--deps/v8/src/regexp/experimental/experimental.h5
-rw-r--r--deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc1264
-rw-r--r--deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h214
-rw-r--r--deps/v8/src/regexp/regexp-ast.h16
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator-inl.h1
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.cc2
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.h1
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-peephole.cc4
-rw-r--r--deps/v8/src/regexp/regexp-compiler-tonode.cc50
-rw-r--r--deps/v8/src/regexp/regexp-compiler.cc50
-rw-r--r--deps/v8/src/regexp/regexp-compiler.h41
-rw-r--r--deps/v8/src/regexp/regexp-error.h5
-rw-r--r--deps/v8/src/regexp/regexp-flags.h71
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.cc5
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.h2
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-arch.h2
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-tracer.cc4
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc6
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h15
-rw-r--r--deps/v8/src/regexp/regexp-nodes.h22
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc811
-rw-r--r--deps/v8/src/regexp/regexp-parser.h366
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc27
-rw-r--r--deps/v8/src/regexp/regexp-utils.h9
-rw-r--r--deps/v8/src/regexp/regexp.cc133
-rw-r--r--deps/v8/src/regexp/regexp.h40
-rw-r--r--deps/v8/src/roots/DIR_METADATA4
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc4
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc8
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc4
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc5
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc42
-rw-r--r--deps/v8/src/runtime/runtime-module.cc24
-rw-r--r--deps/v8/src/runtime/runtime-object.cc27
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc6
-rw-r--r--deps/v8/src/runtime/runtime-test-wasm.cc1
-rw-r--r--deps/v8/src/runtime/runtime-test.cc95
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc2
-rw-r--r--deps/v8/src/runtime/runtime.cc7
-rw-r--r--deps/v8/src/runtime/runtime.h5
-rw-r--r--deps/v8/src/snapshot/context-deserializer.cc1
-rw-r--r--deps/v8/src/snapshot/context-serializer.cc4
-rw-r--r--deps/v8/src/snapshot/deserializer.cc4
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data.cc4
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-empty.cc12
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc6
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc11
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc8
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc7
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc6
-rw-r--r--deps/v8/src/snapshot/serializer.cc22
-rw-r--r--deps/v8/src/snapshot/snapshot.h2
-rw-r--r--deps/v8/src/tasks/OWNERS1
-rw-r--r--deps/v8/src/third_party/vtune/BUILD.gn5
-rw-r--r--deps/v8/src/third_party/vtune/v8-vtune.h2
-rw-r--r--deps/v8/src/third_party/vtune/vtune-jit.cc8
-rw-r--r--deps/v8/src/third_party/vtune/vtune-jit.h5
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc45
-rw-r--r--deps/v8/src/utils/address-map.h1
-rw-r--r--deps/v8/src/utils/allocation.cc56
-rw-r--r--deps/v8/src/utils/allocation.h21
-rw-r--r--deps/v8/src/utils/v8dll-main.cc2
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h28
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h45
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h188
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h16
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h8
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc73
-rw-r--r--deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h2817
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h12
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h25
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h377
-rw-r--r--deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h384
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h341
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h246
-rw-r--r--deps/v8/src/wasm/c-api.cc6
-rw-r--r--deps/v8/src/wasm/c-api.h3
-rw-r--r--deps/v8/src/wasm/code-space-access.cc12
-rw-r--r--deps/v8/src/wasm/code-space-access.h3
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h25
-rw-r--r--deps/v8/src/wasm/function-compiler.cc46
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc89
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.h8
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.cc30
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.h5
-rw-r--r--deps/v8/src/wasm/module-compiler.cc37
-rw-r--r--deps/v8/src/wasm/module-compiler.h2
-rw-r--r--deps/v8/src/wasm/module-decoder.cc15
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc12
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc53
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h78
-rw-r--r--deps/v8/src/wasm/wasm-engine.h1
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc61
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h4
-rw-r--r--deps/v8/src/wasm/wasm-feature-flags.h28
-rw-r--r--deps/v8/src/wasm/wasm-js.cc20
-rw-r--r--deps/v8/src/wasm/wasm-limits.h3
-rw-r--r--deps/v8/src/wasm/wasm-linkage.h9
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc175
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h99
-rw-r--r--deps/v8/src/wasm/wasm-module-sourcemap.cc9
-rw-r--r--deps/v8/src/wasm/wasm-module-sourcemap.h5
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h37
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc27
-rw-r--r--deps/v8/src/wasm/wasm-objects.h16
-rw-r--r--deps/v8/src/web-snapshot/web-snapshot.cc15
725 files changed, 57532 insertions, 17188 deletions
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index b3fcddf2f4..341435e28d 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -52,6 +52,7 @@ include_rules = [
"+src/interpreter/setup-interpreter.h",
"-src/regexp",
"+src/regexp/regexp.h",
+ "+src/regexp/regexp-flags.h",
"+src/regexp/regexp-stack.h",
"+src/regexp/regexp-utils.h",
"-src/trap-handler",
@@ -65,6 +66,10 @@ include_rules = [
"+builtins-generated",
"+torque-generated",
"+starboard",
+ # Using cppgc inside v8 is not (yet) allowed.
+ "-include/cppgc",
+ "+include/cppgc/platform.h",
+ "+include/cppgc/source-location.h",
]
specific_include_rules = {
diff --git a/deps/v8/src/api/api-arguments.h b/deps/v8/src/api/api-arguments.h
index 464ebadf37..98354757be 100644
--- a/deps/v8/src/api/api-arguments.h
+++ b/deps/v8/src/api/api-arguments.h
@@ -5,6 +5,7 @@
#ifndef V8_API_API_ARGUMENTS_H_
#define V8_API_API_ARGUMENTS_H_
+#include "include/v8-template.h"
#include "src/api/api.h"
#include "src/debug/debug.h"
#include "src/execution/isolate.h"
diff --git a/deps/v8/src/api/api-natives.h b/deps/v8/src/api/api-natives.h
index fb59eb6cfc..38a8a7b917 100644
--- a/deps/v8/src/api/api-natives.h
+++ b/deps/v8/src/api/api-natives.h
@@ -5,7 +5,7 @@
#ifndef V8_API_API_NATIVES_H_
#define V8_API_API_NATIVES_H_
-#include "include/v8.h"
+#include "include/v8-template.h"
#include "src/base/macros.h"
#include "src/handles/handles.h"
#include "src/handles/maybe-handles.h"
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index a8af304a53..dedbd5db66 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -11,12 +11,19 @@
#include <utility> // For move
#include <vector>
-#include "include/cppgc/custom-space.h"
+#include "include/v8-callbacks.h"
#include "include/v8-cppgc.h"
+#include "include/v8-date.h"
+#include "include/v8-extension.h"
#include "include/v8-fast-api-calls.h"
+#include "include/v8-function.h"
+#include "include/v8-json.h"
+#include "include/v8-locker.h"
+#include "include/v8-primitive-object.h"
#include "include/v8-profiler.h"
#include "include/v8-unwinder-state.h"
#include "include/v8-util.h"
+#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
#include "src/api/api-natives.h"
#include "src/base/functional.h"
@@ -56,6 +63,7 @@
#include "src/init/icu_util.h"
#include "src/init/startup-data-util.h"
#include "src/init/v8.h"
+#include "src/init/vm-cage.h"
#include "src/json/json-parser.h"
#include "src/json/json-stringifier.h"
#include "src/logging/counters-scopes.h"
@@ -177,6 +185,49 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
return origin;
}
+ScriptOrigin::ScriptOrigin(
+ Local<Value> resource_name, Local<Integer> line_offset,
+ Local<Integer> column_offset, Local<Boolean> is_shared_cross_origin,
+ Local<Integer> script_id, Local<Value> source_map_url,
+ Local<Boolean> is_opaque, Local<Boolean> is_wasm, Local<Boolean> is_module,
+ Local<PrimitiveArray> host_defined_options)
+ : ScriptOrigin(
+ Isolate::GetCurrent(), resource_name,
+ line_offset.IsEmpty() ? 0 : static_cast<int>(line_offset->Value()),
+ column_offset.IsEmpty() ? 0
+ : static_cast<int>(column_offset->Value()),
+ !is_shared_cross_origin.IsEmpty() && is_shared_cross_origin->IsTrue(),
+ static_cast<int>(script_id.IsEmpty() ? -1 : script_id->Value()),
+ source_map_url, !is_opaque.IsEmpty() && is_opaque->IsTrue(),
+ !is_wasm.IsEmpty() && is_wasm->IsTrue(),
+ !is_module.IsEmpty() && is_module->IsTrue(), host_defined_options) {}
+
+ScriptOrigin::ScriptOrigin(Local<Value> resource_name, int line_offset,
+ int column_offset, bool is_shared_cross_origin,
+ int script_id, Local<Value> source_map_url,
+ bool is_opaque, bool is_wasm, bool is_module,
+ Local<PrimitiveArray> host_defined_options)
+ : isolate_(Isolate::GetCurrent()),
+ resource_name_(resource_name),
+ resource_line_offset_(line_offset),
+ resource_column_offset_(column_offset),
+ options_(is_shared_cross_origin, is_opaque, is_wasm, is_module),
+ script_id_(script_id),
+ source_map_url_(source_map_url),
+ host_defined_options_(host_defined_options) {}
+
+Local<Integer> ScriptOrigin::ResourceLineOffset() const {
+ return v8::Integer::New(isolate_, resource_line_offset_);
+}
+
+Local<Integer> ScriptOrigin::ResourceColumnOffset() const {
+ return v8::Integer::New(isolate_, resource_column_offset_);
+}
+
+Local<Integer> ScriptOrigin::ScriptID() const {
+ return v8::Integer::New(isolate_, script_id_);
+}
+
// --- E x c e p t i o n B e h a v i o r ---
void i::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location) {
@@ -331,6 +382,37 @@ void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
namespace {
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+// ArrayBufferAllocator to use when the virtual memory cage is enabled, in which
+// case all ArrayBuffer backing stores need to be allocated inside the data
+// cage. Note, the current implementation is extremely inefficient as it uses
+// the BoundedPageAllocator. In the future, we'll need a proper allocator
+// implementation.
+class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
+ public:
+ ArrayBufferAllocator() { CHECK(page_allocator_); }
+
+ void* Allocate(size_t length) override {
+ return page_allocator_->AllocatePages(nullptr, RoundUp(length, page_size_),
+ page_size_,
+ PageAllocator::kReadWrite);
+ }
+
+ void* AllocateUninitialized(size_t length) override {
+ return Allocate(length);
+ }
+
+ void Free(void* data, size_t length) override {
+ page_allocator_->FreePages(data, RoundUp(length, page_size_));
+ }
+
+ private:
+ PageAllocator* page_allocator_ = internal::GetPlatformDataCagePageAllocator();
+ const size_t page_size_ = page_allocator_->AllocatePageSize();
+};
+
+#else
+
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
void* Allocate(size_t length) override {
@@ -372,6 +454,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return new_data;
}
};
+#endif // V8_VIRTUAL_MEMORY_CAGE
struct SnapshotCreatorData {
explicit SnapshotCreatorData(Isolate* isolate)
@@ -746,9 +829,17 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
}
}
-i::Address* V8::GlobalizeReference(i::Isolate* isolate, i::Address* obj) {
- LOG_API(isolate, Persistent, New);
- i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
+namespace api_internal {
+i::Address* GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj,
+ internal::Address* slot,
+ bool has_destructor) {
+ LOG_API(isolate, TracedGlobal, New);
+#ifdef DEBUG
+ Utils::ApiCheck((slot != nullptr), "v8::GlobalizeTracedReference",
+ "the address slot must be not null");
+#endif
+ i::Handle<i::Object> result =
+ isolate->global_handles()->CreateTraced(*obj, slot, has_destructor);
#ifdef VERIFY_HEAP
if (i::FLAG_verify_heap) {
i::Object(*obj).ObjectVerify(isolate);
@@ -757,16 +848,9 @@ i::Address* V8::GlobalizeReference(i::Isolate* isolate, i::Address* obj) {
return result.location();
}
-i::Address* V8::GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj,
- internal::Address* slot,
- bool has_destructor) {
- LOG_API(isolate, TracedGlobal, New);
-#ifdef DEBUG
- Utils::ApiCheck((slot != nullptr), "v8::GlobalizeTracedReference",
- "the address slot must be not null");
-#endif
- i::Handle<i::Object> result =
- isolate->global_handles()->CreateTraced(*obj, slot, has_destructor);
+i::Address* GlobalizeReference(i::Isolate* isolate, i::Address* obj) {
+ LOG_API(isolate, Persistent, New);
+ i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
#ifdef VERIFY_HEAP
if (i::FLAG_verify_heap) {
i::Object(*obj).ObjectVerify(isolate);
@@ -775,59 +859,38 @@ i::Address* V8::GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj,
return result.location();
}
-i::Address* V8::CopyGlobalReference(i::Address* from) {
+i::Address* CopyGlobalReference(i::Address* from) {
i::Handle<i::Object> result = i::GlobalHandles::CopyGlobal(from);
return result.location();
}
-void V8::MoveGlobalReference(internal::Address** from, internal::Address** to) {
+void MoveGlobalReference(internal::Address** from, internal::Address** to) {
i::GlobalHandles::MoveGlobal(from, to);
}
-void V8::MoveTracedGlobalReference(internal::Address** from,
- internal::Address** to) {
- i::GlobalHandles::MoveTracedGlobal(from, to);
-}
-
-void V8::CopyTracedGlobalReference(const internal::Address* const* from,
- internal::Address** to) {
- i::GlobalHandles::CopyTracedGlobal(from, to);
-}
-
-void V8::MakeWeak(i::Address* location, void* parameter,
- WeakCallbackInfo<void>::Callback weak_callback,
- WeakCallbackType type) {
+void MakeWeak(i::Address* location, void* parameter,
+ WeakCallbackInfo<void>::Callback weak_callback,
+ WeakCallbackType type) {
i::GlobalHandles::MakeWeak(location, parameter, weak_callback, type);
}
-void V8::MakeWeak(i::Address** location_addr) {
+void MakeWeak(i::Address** location_addr) {
i::GlobalHandles::MakeWeak(location_addr);
}
-void* V8::ClearWeak(i::Address* location) {
+void* ClearWeak(i::Address* location) {
return i::GlobalHandles::ClearWeakness(location);
}
-void V8::AnnotateStrongRetainer(i::Address* location, const char* label) {
+void AnnotateStrongRetainer(i::Address* location, const char* label) {
i::GlobalHandles::AnnotateStrongRetainer(location, label);
}
-void V8::DisposeGlobal(i::Address* location) {
+void DisposeGlobal(i::Address* location) {
i::GlobalHandles::Destroy(location);
}
-void V8::DisposeTracedGlobal(internal::Address* location) {
- i::GlobalHandles::DestroyTraced(location);
-}
-
-void V8::SetFinalizationCallbackTraced(
- internal::Address* location, void* parameter,
- WeakCallbackInfo<void>::Callback callback) {
- i::GlobalHandles::SetFinalizationCallbackForTraced(location, parameter,
- callback);
-}
-
-Value* V8::Eternalize(Isolate* v8_isolate, Value* value) {
+Value* Eternalize(Isolate* v8_isolate, Value* value) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
i::Object object = *Utils::OpenHandle(value);
int index = -1;
@@ -836,20 +899,42 @@ Value* V8::Eternalize(Isolate* v8_isolate, Value* value) {
isolate->eternal_handles()->Get(index).location());
}
-void V8::FromJustIsNothing() {
+void MoveTracedGlobalReference(internal::Address** from,
+ internal::Address** to) {
+ i::GlobalHandles::MoveTracedGlobal(from, to);
+}
+
+void CopyTracedGlobalReference(const internal::Address* const* from,
+ internal::Address** to) {
+ i::GlobalHandles::CopyTracedGlobal(from, to);
+}
+
+void DisposeTracedGlobal(internal::Address* location) {
+ i::GlobalHandles::DestroyTraced(location);
+}
+
+void SetFinalizationCallbackTraced(internal::Address* location, void* parameter,
+ WeakCallbackInfo<void>::Callback callback) {
+ i::GlobalHandles::SetFinalizationCallbackForTraced(location, parameter,
+ callback);
+}
+
+void FromJustIsNothing() {
Utils::ApiCheck(false, "v8::FromJust", "Maybe value is Nothing.");
}
-void V8::ToLocalEmpty() {
+void ToLocalEmpty() {
Utils::ApiCheck(false, "v8::ToLocalChecked", "Empty MaybeLocal.");
}
-void V8::InternalFieldOutOfBounds(int index) {
+void InternalFieldOutOfBounds(int index) {
Utils::ApiCheck(0 <= index && index < kInternalFieldsInWeakCallback,
"WeakCallbackInfo::GetInternalField",
"Internal field out of bounds.");
}
+} // namespace api_internal
+
// --- H a n d l e s ---
HandleScope::HandleScope(Isolate* isolate) { Initialize(isolate); }
@@ -2387,42 +2472,44 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
i::Handle<i::String> str = Utils::OpenHandle(*(source->source_string));
- std::unique_ptr<i::AlignedCachedData> cached_data;
- if (options == kConsumeCodeCache) {
- if (source->consume_cache_task) {
- // If there's a cache consume task, finish it
- i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
- source->consume_cache_task->impl_->Finish(isolate, str,
- source->resource_options);
- i::Handle<i::SharedFunctionInfo> result;
- if (maybe_function_info.ToHandle(&result)) {
- RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
- }
- // If the above failed, then we must have rejected the cache. Continue
- // with normal compilation, disabling the code cache consumption.
- source->cached_data->rejected = true;
- options = kNoCompileOptions;
- } else {
- DCHECK(source->cached_data);
- // AlignedCachedData takes care of pointer-aligning the data.
- cached_data.reset(new i::AlignedCachedData(source->cached_data->data,
- source->cached_data->length));
- }
- }
-
i::Handle<i::SharedFunctionInfo> result;
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileScript");
i::ScriptDetails script_details = GetScriptDetails(
isolate, source->resource_name, source->resource_line_offset,
source->resource_column_offset, source->source_map_url,
source->host_defined_options, source->resource_options);
- i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
- i::Compiler::GetSharedFunctionInfoForScript(
- isolate, str, script_details, nullptr, cached_data.get(), options,
- no_cache_reason, i::NOT_NATIVES_CODE);
+
+ i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info;
if (options == kConsumeCodeCache) {
- source->cached_data->rejected = cached_data->rejected();
+ if (source->consume_cache_task) {
+ // Take ownership of the internal deserialization task and clear it off
+ // the consume task on the source.
+ DCHECK_NOT_NULL(source->consume_cache_task->impl_);
+ std::unique_ptr<i::BackgroundDeserializeTask> deserialize_task =
+ std::move(source->consume_cache_task->impl_);
+ maybe_function_info =
+ i::Compiler::GetSharedFunctionInfoForScriptWithDeserializeTask(
+ isolate, str, script_details, deserialize_task.get(), options,
+ no_cache_reason, i::NOT_NATIVES_CODE);
+ source->cached_data->rejected = deserialize_task->rejected();
+ } else {
+ DCHECK(source->cached_data);
+ // AlignedCachedData takes care of pointer-aligning the data.
+ auto cached_data = std::make_unique<i::AlignedCachedData>(
+ source->cached_data->data, source->cached_data->length);
+ maybe_function_info =
+ i::Compiler::GetSharedFunctionInfoForScriptWithCachedData(
+ isolate, str, script_details, cached_data.get(), options,
+ no_cache_reason, i::NOT_NATIVES_CODE);
+ source->cached_data->rejected = cached_data->rejected();
+ }
+ } else {
+ // Compile without any cache.
+ maybe_function_info = i::Compiler::GetSharedFunctionInfoForScript(
+ isolate, str, script_details, options, no_cache_reason,
+ i::NOT_NATIVES_CODE);
}
+
has_pending_exception = !maybe_function_info.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(UnboundScript);
RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
@@ -2726,7 +2813,7 @@ v8::TryCatch::TryCatch(v8::Isolate* isolate)
has_terminated_(false) {
ResetInternal();
// Special handling for simulators which have a separate JS stack.
- js_stack_comparable_address_ = reinterpret_cast<void*>(
+ js_stack_comparable_address_ = static_cast<internal::Address>(
i::SimulatorStack::RegisterJSStackComparableAddress(isolate_));
isolate_->RegisterTryCatchHandler(this);
}
@@ -5852,6 +5939,12 @@ void v8::V8::InitializePlatform(Platform* platform) {
i::V8::InitializePlatform(platform);
}
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+bool v8::V8::InitializeVirtualMemoryCage() {
+ return i::V8::InitializeVirtualMemoryCage();
+}
+#endif
+
void v8::V8::ShutdownPlatform() { i::V8::ShutdownPlatform(); }
bool v8::V8::Initialize(const int build_config) {
@@ -5882,6 +5975,16 @@ bool v8::V8::Initialize(const int build_config) {
V8_HEAP_SANDBOX_BOOL ? "ENABLED" : "DISABLED");
}
+ const bool kEmbedderVirtualMemoryCage =
+ (build_config & kVirtualMemoryCage) != 0;
+ if (kEmbedderVirtualMemoryCage != V8_VIRTUAL_MEMORY_CAGE_BOOL) {
+ FATAL(
+ "Embedder-vs-V8 build configuration mismatch. On embedder side "
+ "virtual memory cage is %s while on V8 side it's %s.",
+ kEmbedderVirtualMemoryCage ? "ENABLED" : "DISABLED",
+ V8_VIRTUAL_MEMORY_CAGE_BOOL ? "ENABLED" : "DISABLED");
+ }
+
i::V8::Initialize();
return true;
}
@@ -5998,6 +6101,13 @@ void v8::V8::InitializeExternalStartupDataFromFile(const char* snapshot_blob) {
const char* v8::V8::GetVersion() { return i::Version::GetVersion(); }
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+PageAllocator* v8::V8::GetVirtualMemoryCageDataPageAllocator() {
+ CHECK(i::GetProcessWideVirtualMemoryCage()->is_initialized());
+ return i::GetProcessWideVirtualMemoryCage()->GetDataCagePageAllocator();
+}
+#endif
+
void V8::GetSharedMemoryStatistics(SharedMemoryStatistics* statistics) {
i::ReadOnlyHeap::PopulateReadOnlySpaceStatistics(statistics);
}
@@ -10336,6 +10446,18 @@ bool ConvertDouble(double d) {
} // namespace internal
+bool CopyAndConvertArrayToCppBufferInt32(Local<Array> src, int32_t* dst,
+ uint32_t max_length) {
+ return CopyAndConvertArrayToCppBuffer<&v8::kTypeInfoInt32, int32_t>(
+ src, dst, max_length);
+}
+
+bool CopyAndConvertArrayToCppBufferFloat64(Local<Array> src, double* dst,
+ uint32_t max_length) {
+ return CopyAndConvertArrayToCppBuffer<&v8::kTypeInfoFloat64, double>(
+ src, dst, max_length);
+}
+
} // namespace v8
#undef TRACE_BS
diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h
index 7d2a0c3e9c..e24c951306 100644
--- a/deps/v8/src/api/api.h
+++ b/deps/v8/src/api/api.h
@@ -7,6 +7,11 @@
#include <memory>
+#include "include/v8-container.h"
+#include "include/v8-external.h"
+#include "include/v8-proxy.h"
+#include "include/v8-typed-array.h"
+#include "include/v8-wasm.h"
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/objects/bigint.h"
@@ -18,12 +23,16 @@
#include "src/objects/objects.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/source-text-module.h"
-#include "src/utils/detachable-vector.h"
-
#include "src/objects/templates.h"
+#include "src/utils/detachable-vector.h"
namespace v8 {
+class AccessorSignature;
+class Extension;
+class Signature;
+class Template;
+
namespace internal {
class JSArrayBufferView;
class JSFinalizationRegistry;
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index 8babca7a3b..09c520bbc0 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -698,7 +698,8 @@ void AsmJsParser::ValidateFunctionTable() {
FAIL("Function table definition doesn't match use");
}
module_builder_->SetIndirectFunction(
- static_cast<uint32_t>(table_info->index + count), info->index);
+ 0, static_cast<uint32_t>(table_info->index + count), info->index,
+ WasmModuleBuilder::WasmElemSegment::kRelativeToDeclaredFunctions);
}
++count;
if (Check(',')) {
@@ -2134,7 +2135,10 @@ AsmType* AsmJsParser::ValidateCall() {
EXPECT_TOKENn(']');
VarInfo* function_info = GetVarInfo(function_name);
if (function_info->kind == VarKind::kUnused) {
- uint32_t index = module_builder_->AllocateIndirectFunctions(mask + 1);
+ if (module_builder_->NumTables() == 0) {
+ module_builder_->AddTable(kWasmFuncRef, 0);
+ }
+ uint32_t index = module_builder_->IncreaseTableMinSize(0, mask + 1);
if (index == std::numeric_limits<uint32_t>::max()) {
FAILn("Exceeded maximum function table size");
}
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index fb3690164d..6a68a80cdc 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -13,6 +13,7 @@
#include "src/base/vector.h"
#include "src/common/globals.h"
#include "src/objects/objects-inl.h"
+#include "src/regexp/regexp-flags.h"
#include "src/strings/string-builder-inl.h"
namespace v8 {
@@ -72,6 +73,12 @@ void CallPrinter::Find(AstNode* node, bool print) {
}
}
+void CallPrinter::Print(char c) {
+ if (!found_ || done_) return;
+ num_prints_++;
+ builder_->AppendCharacter(c);
+}
+
void CallPrinter::Print(const char* str) {
if (!found_ || done_) return;
num_prints_++;
@@ -269,13 +276,10 @@ void CallPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
Print("/");
PrintLiteral(node->pattern(), false);
Print("/");
- if (node->flags() & RegExp::kHasIndices) Print("d");
- if (node->flags() & RegExp::kGlobal) Print("g");
- if (node->flags() & RegExp::kIgnoreCase) Print("i");
- if (node->flags() & RegExp::kLinear) Print("l");
- if (node->flags() & RegExp::kMultiline) Print("m");
- if (node->flags() & RegExp::kUnicode) Print("u");
- if (node->flags() & RegExp::kSticky) Print("y");
+#define V(Lower, Camel, LowerCamel, Char, Bit) \
+ if (node->flags() & RegExp::k##Camel) Print(Char);
+ REGEXP_FLAG_LIST(V)
+#undef V
}
@@ -1189,13 +1193,10 @@ void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
PrintLiteralIndented("PATTERN", node->raw_pattern(), false);
int i = 0;
base::EmbeddedVector<char, 128> buf;
- if (node->flags() & RegExp::kHasIndices) buf[i++] = 'd';
- if (node->flags() & RegExp::kGlobal) buf[i++] = 'g';
- if (node->flags() & RegExp::kIgnoreCase) buf[i++] = 'i';
- if (node->flags() & RegExp::kLinear) buf[i++] = 'l';
- if (node->flags() & RegExp::kMultiline) buf[i++] = 'm';
- if (node->flags() & RegExp::kUnicode) buf[i++] = 'u';
- if (node->flags() & RegExp::kSticky) buf[i++] = 'y';
+#define V(Lower, Camel, LowerCamel, Char, Bit) \
+ if (node->flags() & RegExp::k##Camel) buf[i++] = Char;
+ REGEXP_FLAG_LIST(V)
+#undef V
buf[i] = '\0';
PrintIndented("FLAGS ");
Print("%s", buf.begin());
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index e26d98e7a3..4ffc36a3a2 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -52,6 +52,7 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
#undef DECLARE_VISIT
private:
+ void Print(char c);
void Print(const char* str);
void Print(Handle<String> str);
diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h
index 888157dc61..20efe3479c 100644
--- a/deps/v8/src/base/atomicops.h
+++ b/deps/v8/src/base/atomicops.h
@@ -191,11 +191,31 @@ inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) {
std::memory_order_release);
}
+inline void Release_Store(volatile Atomic16* ptr, Atomic16 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_release);
+}
+
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_release);
}
+inline void SeqCst_Store(volatile Atomic8* ptr, Atomic8 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_seq_cst);
+}
+
+inline void SeqCst_Store(volatile Atomic16* ptr, Atomic16 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_seq_cst);
+}
+
+inline void SeqCst_Store(volatile Atomic32* ptr, Atomic32 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_seq_cst);
+}
+
inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_relaxed);
@@ -279,6 +299,11 @@ inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
std::memory_order_release);
}
+inline void SeqCst_Store(volatile Atomic64* ptr, Atomic64 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_seq_cst);
+}
+
inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_relaxed);
diff --git a/deps/v8/src/base/bounded-page-allocator.cc b/deps/v8/src/base/bounded-page-allocator.cc
index fa7b10324d..0143b179ff 100644
--- a/deps/v8/src/base/bounded-page-allocator.cc
+++ b/deps/v8/src/base/bounded-page-allocator.cc
@@ -30,19 +30,30 @@ void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
PageAllocator::Permission access) {
MutexGuard guard(&mutex_);
DCHECK(IsAligned(alignment, region_allocator_.page_size()));
-
- // Region allocator does not support alignments bigger than it's own
- // allocation alignment.
- DCHECK_LE(alignment, allocate_page_size_);
-
- // TODO(ishell): Consider using randomized version here.
- Address address = region_allocator_.AllocateRegion(size);
+ DCHECK(IsAligned(alignment, allocate_page_size_));
+
+ Address address;
+ if (alignment <= allocate_page_size_) {
+ // TODO(ishell): Consider using randomized version here.
+ address = region_allocator_.AllocateRegion(size);
+ } else {
+ // Currently, this should only be necessary when V8_VIRTUAL_MEMORY_CAGE is
+ // enabled, in which case a bounded page allocator is used to allocate WASM
+ // memory buffers, which have a larger alignment.
+ address = region_allocator_.AllocateAlignedRegion(size, alignment);
+ }
if (address == RegionAllocator::kAllocationFailure) {
return nullptr;
}
- CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(address), size,
- access));
- return reinterpret_cast<void*>(address);
+
+ void* ptr = reinterpret_cast<void*>(address);
+ if (!page_allocator_->SetPermissions(ptr, size, access)) {
+ // This most likely means that we ran out of memory.
+ CHECK_EQ(region_allocator_.FreeRegion(address), size);
+ return nullptr;
+ }
+
+ return ptr;
}
bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size,
@@ -59,8 +70,13 @@ bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size,
}
}
- CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(address), size,
- access));
+ void* ptr = reinterpret_cast<void*>(address);
+ if (!page_allocator_->SetPermissions(ptr, size, access)) {
+ // This most likely means that we ran out of memory.
+ CHECK_EQ(region_allocator_.FreeRegion(address), size);
+ return false;
+ }
+
return true;
}
@@ -94,8 +110,16 @@ bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
Address address = reinterpret_cast<Address>(raw_address);
size_t freed_size = region_allocator_.FreeRegion(address);
if (freed_size != size) return false;
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ // When the virtual memory cage is enabled, the pages returned by the
+ // BoundedPageAllocator must be zero-initialized, as some of the additional
+ // clients expect them to. Decommitting them during FreePages ensures that
+ // while also changing the access permissions to kNoAccess.
+ CHECK(page_allocator_->DecommitPages(raw_address, size));
+#else
CHECK(page_allocator_->SetPermissions(raw_address, size,
PageAllocator::kNoAccess));
+#endif
return true;
}
@@ -128,8 +152,14 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
// Keep the region in "used" state just uncommit some pages.
Address free_address = address + new_size;
size_t free_size = size - new_size;
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ // See comment in FreePages().
+ return page_allocator_->DecommitPages(reinterpret_cast<void*>(free_address),
+ free_size);
+#else
return page_allocator_->SetPermissions(reinterpret_cast<void*>(free_address),
free_size, PageAllocator::kNoAccess);
+#endif
}
bool BoundedPageAllocator::SetPermissions(void* address, size_t size,
@@ -144,5 +174,9 @@ bool BoundedPageAllocator::DiscardSystemPages(void* address, size_t size) {
return page_allocator_->DiscardSystemPages(address, size);
}
+bool BoundedPageAllocator::DecommitPages(void* address, size_t size) {
+ return page_allocator_->DecommitPages(address, size);
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/bounded-page-allocator.h b/deps/v8/src/base/bounded-page-allocator.h
index 1c8c846711..db364255f1 100644
--- a/deps/v8/src/base/bounded-page-allocator.h
+++ b/deps/v8/src/base/bounded-page-allocator.h
@@ -71,6 +71,8 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
bool DiscardSystemPages(void* address, size_t size) override;
+ bool DecommitPages(void* address, size_t size) override;
+
private:
v8::base::Mutex mutex_;
const size_t allocate_page_size_;
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index d7a0c9f3cf..3303916776 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -33,6 +33,9 @@
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
+#elif defined(__loongarch64)
+#define V8_HOST_ARCH_LOONG64 1
+#define V8_HOST_ARCH_64_BIT 1
#elif defined(__PPC64__) || defined(_ARCH_PPC64)
#define V8_HOST_ARCH_PPC64 1
#define V8_HOST_ARCH_64_BIT 1
@@ -83,7 +86,7 @@
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \
!V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \
!V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \
- !V8_TARGET_ARCH_RISCV64
+ !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
@@ -128,6 +131,8 @@
#define V8_TARGET_ARCH_32_BIT 1
#elif V8_TARGET_ARCH_MIPS64
#define V8_TARGET_ARCH_64_BIT 1
+#elif V8_TARGET_ARCH_LOONG64
+#define V8_TARGET_ARCH_64_BIT 1
#elif V8_TARGET_ARCH_PPC
#define V8_TARGET_ARCH_32_BIT 1
#elif V8_TARGET_ARCH_PPC64
@@ -171,6 +176,9 @@
#if (V8_TARGET_ARCH_RISCV64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_RISCV64))
#error Target architecture riscv64 is only supported on riscv64 and x64 host
#endif
+#if (V8_TARGET_ARCH_LOONG64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_LOONG64))
+#error Target architecture loong64 is only supported on loong64 and x64 host
+#endif
// Determine architecture endianness.
#if V8_TARGET_ARCH_IA32
@@ -181,6 +189,8 @@
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_ARM64
#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_LOONG64
+#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_MIPS
#if defined(__MIPSEB__)
#define V8_TARGET_BIG_ENDIAN 1
diff --git a/deps/v8/src/base/compiler-specific.h b/deps/v8/src/base/compiler-specific.h
index f7e2e0e14d..0c37e56afa 100644
--- a/deps/v8/src/base/compiler-specific.h
+++ b/deps/v8/src/base/compiler-specific.h
@@ -7,13 +7,15 @@
#include "include/v8config.h"
-// Annotate a using ALLOW_UNUSED_TYPE = or function indicating it's ok if it's
-// not used. Use like:
-// using Bar = Foo;
+// Annotation to silence compiler warnings about unused
+// types/functions/variables. Use like:
+//
+// using V8_ALLOW_UNUSED Bar = Foo;
+// V8_ALLOW_UNUSED void foo() {}
#if V8_HAS_ATTRIBUTE_UNUSED
-#define ALLOW_UNUSED_TYPE __attribute__((unused))
+#define V8_ALLOW_UNUSED __attribute__((unused))
#else
-#define ALLOW_UNUSED_TYPE
+#define V8_ALLOW_UNUSED
#endif
// Tell the compiler a function is using a printf-style format string.
diff --git a/deps/v8/src/base/flags.h b/deps/v8/src/base/flags.h
index 96d99059ca..2a36ca77e8 100644
--- a/deps/v8/src/base/flags.h
+++ b/deps/v8/src/base/flags.h
@@ -89,39 +89,39 @@ class Flags final {
mask_type mask_;
};
-#define DEFINE_OPERATORS_FOR_FLAGS(Type) \
- ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator&( \
- Type::flag_type lhs, Type::flag_type rhs) { \
- return Type(lhs) & rhs; \
- } \
- ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator&( \
- Type::flag_type lhs, const Type& rhs) { \
- return rhs & lhs; \
- } \
- ALLOW_UNUSED_TYPE inline void operator&(Type::flag_type lhs, \
- Type::mask_type rhs) {} \
- ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator|( \
- Type::flag_type lhs, Type::flag_type rhs) { \
- return Type(lhs) | rhs; \
- } \
- ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator|( \
- Type::flag_type lhs, const Type& rhs) { \
- return rhs | lhs; \
- } \
- ALLOW_UNUSED_TYPE inline void operator|(Type::flag_type lhs, \
- Type::mask_type rhs) {} \
- ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator^( \
- Type::flag_type lhs, Type::flag_type rhs) { \
- return Type(lhs) ^ rhs; \
- } \
- ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator^( \
- Type::flag_type lhs, const Type& rhs) { \
- return rhs ^ lhs; \
- } \
- ALLOW_UNUSED_TYPE inline void operator^(Type::flag_type lhs, \
- Type::mask_type rhs) {} \
- ALLOW_UNUSED_TYPE inline constexpr Type operator~(Type::flag_type val) { \
- return ~Type(val); \
+#define DEFINE_OPERATORS_FOR_FLAGS(Type) \
+ V8_ALLOW_UNUSED V8_WARN_UNUSED_RESULT inline constexpr Type operator&( \
+ Type::flag_type lhs, Type::flag_type rhs) { \
+ return Type(lhs) & rhs; \
+ } \
+ V8_ALLOW_UNUSED V8_WARN_UNUSED_RESULT inline constexpr Type operator&( \
+ Type::flag_type lhs, const Type& rhs) { \
+ return rhs & lhs; \
+ } \
+ V8_ALLOW_UNUSED inline void operator&(Type::flag_type lhs, \
+ Type::mask_type rhs) {} \
+ V8_ALLOW_UNUSED V8_WARN_UNUSED_RESULT inline constexpr Type operator|( \
+ Type::flag_type lhs, Type::flag_type rhs) { \
+ return Type(lhs) | rhs; \
+ } \
+ V8_ALLOW_UNUSED V8_WARN_UNUSED_RESULT inline constexpr Type operator|( \
+ Type::flag_type lhs, const Type& rhs) { \
+ return rhs | lhs; \
+ } \
+ V8_ALLOW_UNUSED inline void operator|(Type::flag_type lhs, \
+ Type::mask_type rhs) {} \
+ V8_ALLOW_UNUSED V8_WARN_UNUSED_RESULT inline constexpr Type operator^( \
+ Type::flag_type lhs, Type::flag_type rhs) { \
+ return Type(lhs) ^ rhs; \
+ } \
+ V8_ALLOW_UNUSED V8_WARN_UNUSED_RESULT inline constexpr Type operator^( \
+ Type::flag_type lhs, const Type& rhs) { \
+ return rhs ^ lhs; \
+ } \
+ V8_ALLOW_UNUSED inline void operator^(Type::flag_type lhs, \
+ Type::mask_type rhs) {} \
+ V8_ALLOW_UNUSED inline constexpr Type operator~(Type::flag_type val) { \
+ return ~Type(val); \
}
} // namespace base
diff --git a/deps/v8/src/base/optional.h b/deps/v8/src/base/optional.h
index 77e9bb896e..31fe9a972c 100644
--- a/deps/v8/src/base/optional.h
+++ b/deps/v8/src/base/optional.h
@@ -35,7 +35,7 @@ constexpr in_place_t in_place = {};
// http://en.cppreference.com/w/cpp/utility/optional/nullopt
constexpr nullopt_t nullopt(0);
-// Forward declaration, which is refered by following helpers.
+// Forward declaration, which is referred by following helpers.
template <typename T>
class Optional;
diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc
index 1438c88337..2956bf1475 100644
--- a/deps/v8/src/base/page-allocator.cc
+++ b/deps/v8/src/base/page-allocator.cc
@@ -151,5 +151,9 @@ bool PageAllocator::DiscardSystemPages(void* address, size_t size) {
return base::OS::DiscardSystemPages(address, size);
}
+bool PageAllocator::DecommitPages(void* address, size_t size) {
+ return base::OS::DecommitPages(address, size);
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/page-allocator.h b/deps/v8/src/base/page-allocator.h
index a98f084790..7374c67837 100644
--- a/deps/v8/src/base/page-allocator.h
+++ b/deps/v8/src/base/page-allocator.h
@@ -47,6 +47,8 @@ class V8_BASE_EXPORT PageAllocator
bool DiscardSystemPages(void* address, size_t size) override;
+ bool DecommitPages(void* address, size_t size) override;
+
private:
friend class v8::base::SharedMemory;
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index bd0000c4a1..c51012c3f1 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -133,6 +133,11 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
return status == ZX_OK;
}
+bool OS::DecommitPages(void* address, size_t size) {
+ // TODO(chromium:1218005): support this.
+ return false;
+}
+
// static
bool OS::HasLazyCommits() {
// TODO(scottmg): Port, https://crbug.com/731217.
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 179a17cc0f..f05f22c913 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -341,6 +341,10 @@ void* OS::GetRandomMmapAddr() {
// TODO(RISCV): We need more information from the kernel to correctly mask
// this address for RISC-V. https://github.com/v8-riscv/v8/issues/375
raw_addr &= uint64_t{0xFFFFFF0000};
+#elif V8_TARGET_ARCH_LOONG64
+ // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
+ // to fulfill request.
+ raw_addr &= uint64_t{0xFFFFFF0000};
#else
raw_addr &= 0x3FFFF000;
@@ -491,6 +495,20 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
return ret == 0;
}
+bool OS::DecommitPages(void* address, size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+ // From https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html:
+ // "If a MAP_FIXED request is successful, then any previous mappings [...] for
+ // those whole pages containing any part of the address range [pa,pa+len)
+ // shall be removed, as if by an appropriate call to munmap(), before the new
+ // mapping is established." As a consequence, the memory will be
+ // zero-initialized on next access.
+ void* ptr = mmap(address, size, PROT_NONE,
+ MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ return ptr == address;
+}
+
// static
bool OS::HasLazyCommits() {
#if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX
@@ -530,6 +548,8 @@ void OS::DebugBreak() {
asm("break");
#elif V8_HOST_ARCH_MIPS64
asm("break");
+#elif V8_HOST_ARCH_LOONG64
+ asm("break 0");
#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
asm("twge 2,2");
#elif V8_HOST_ARCH_IA32
@@ -566,25 +586,29 @@ class PosixMemoryMappedFile final : public OS::MemoryMappedFile {
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name,
FileMode mode) {
const char* fopen_mode = (mode == FileMode::kReadOnly) ? "r" : "r+";
- if (FILE* file = fopen(name, fopen_mode)) {
- if (fseek(file, 0, SEEK_END) == 0) {
- long size = ftell(file); // NOLINT(runtime/int)
- if (size == 0) return new PosixMemoryMappedFile(file, nullptr, 0);
- if (size > 0) {
- int prot = PROT_READ;
- int flags = MAP_PRIVATE;
- if (mode == FileMode::kReadWrite) {
- prot |= PROT_WRITE;
- flags = MAP_SHARED;
- }
- void* const memory =
- mmap(OS::GetRandomMmapAddr(), size, prot, flags, fileno(file), 0);
- if (memory != MAP_FAILED) {
- return new PosixMemoryMappedFile(file, memory, size);
+ struct stat statbuf;
+ // Make sure path exists and is not a directory.
+ if (stat(name, &statbuf) == 0 && !S_ISDIR(statbuf.st_mode)) {
+ if (FILE* file = fopen(name, fopen_mode)) {
+ if (fseek(file, 0, SEEK_END) == 0) {
+ long size = ftell(file); // NOLINT(runtime/int)
+ if (size == 0) return new PosixMemoryMappedFile(file, nullptr, 0);
+ if (size > 0) {
+ int prot = PROT_READ;
+ int flags = MAP_PRIVATE;
+ if (mode == FileMode::kReadWrite) {
+ prot |= PROT_WRITE;
+ flags = MAP_SHARED;
+ }
+ void* const memory =
+ mmap(OS::GetRandomMmapAddr(), size, prot, flags, fileno(file), 0);
+ if (memory != MAP_FAILED) {
+ return new PosixMemoryMappedFile(file, memory, size);
+ }
}
}
+ fclose(file);
}
- fclose(file);
}
return nullptr;
}
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 79c1aa06ce..6b5c5df496 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -935,6 +935,21 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
}
// static
+bool OS::DecommitPages(void* address, size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+ // https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualfree:
+ // "If a page is decommitted but not released, its state changes to reserved.
+ // Subsequently, you can call VirtualAlloc to commit it, or VirtualFree to
+ // release it. Attempts to read from or write to a reserved page results in an
+ // access violation exception."
+ // https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc
+ // for MEM_COMMIT: "The function also guarantees that when the caller later
+ // initially accesses the memory, the contents will be zero."
+ return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+}
+
+// static
bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index d196578342..2e7ad32974 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -311,6 +311,8 @@ class V8_BASE_EXPORT OS {
V8_WARN_UNUSED_RESULT static bool DiscardSystemPages(void* address,
size_t size);
+ V8_WARN_UNUSED_RESULT static bool DecommitPages(void* address, size_t size);
+
static const int msPerSecond = 1000;
#if V8_OS_POSIX
diff --git a/deps/v8/src/base/region-allocator.cc b/deps/v8/src/base/region-allocator.cc
index 9224dc99dc..53932d2864 100644
--- a/deps/v8/src/base/region-allocator.cc
+++ b/deps/v8/src/base/region-allocator.cc
@@ -200,6 +200,35 @@ bool RegionAllocator::AllocateRegionAt(Address requested_address, size_t size,
return true;
}
+RegionAllocator::Address RegionAllocator::AllocateAlignedRegion(
+ size_t size, size_t alignment) {
+ DCHECK(IsAligned(size, page_size_));
+ DCHECK(IsAligned(alignment, page_size_));
+ DCHECK_GE(alignment, page_size_);
+
+ const size_t padded_size = size + alignment - page_size_;
+ Region* region = FreeListFindRegion(padded_size);
+ if (region == nullptr) return kAllocationFailure;
+
+ if (!IsAligned(region->begin(), alignment)) {
+ size_t start = RoundUp(region->begin(), alignment);
+ region = Split(region, start - region->begin());
+ DCHECK_EQ(region->begin(), start);
+ DCHECK(IsAligned(region->begin(), alignment));
+ }
+
+ if (region->size() != size) {
+ Split(region, size);
+ }
+ DCHECK(IsAligned(region->begin(), alignment));
+ DCHECK_EQ(region->size(), size);
+
+ // Mark region as used.
+ FreeListRemoveRegion(region);
+ region->set_state(RegionState::kAllocated);
+ return region->begin();
+}
+
size_t RegionAllocator::TrimRegion(Address address, size_t new_size) {
DCHECK(IsAligned(new_size, page_size_));
diff --git a/deps/v8/src/base/region-allocator.h b/deps/v8/src/base/region-allocator.h
index adc4bd10b6..f80524870f 100644
--- a/deps/v8/src/base/region-allocator.h
+++ b/deps/v8/src/base/region-allocator.h
@@ -61,6 +61,11 @@ class V8_BASE_EXPORT RegionAllocator final {
bool AllocateRegionAt(Address requested_address, size_t size,
RegionState region_state = RegionState::kAllocated);
+ // Allocates a region of |size| aligned to |alignment|. The size and alignment
+ // must be a multiple of |page_size|. Returns the address of the region on
+ // success or kAllocationFailure.
+ Address AllocateAlignedRegion(size_t size, size_t alignment);
+
// Frees region at given |address|, returns the size of the region.
// There must be a used region starting at given address otherwise nothing
// will be freed and 0 will be returned.
diff --git a/deps/v8/src/base/sanitizer/asan.h b/deps/v8/src/base/sanitizer/asan.h
index 82f03aa258..6466fc6163 100644
--- a/deps/v8/src/base/sanitizer/asan.h
+++ b/deps/v8/src/base/sanitizer/asan.h
@@ -24,8 +24,9 @@
// Check that all bytes in a memory region are poisoned. This is different from
// `__asan_region_is_poisoned()` which only requires a single byte in the region
-// to be poisoned.
-#define ASAN_CHECK_MEMORY_REGION_IS_POISONED(start, size) \
+// to be poisoned. Please note that the macro only works if both start and size
+// are multiple of asan's shadow memory granularity.
+#define ASAN_CHECK_WHOLE_MEMORY_REGION_IS_POISONED(start, size) \
do { \
for (size_t i = 0; i < size; i++) { \
CHECK(__asan_address_is_poisoned(reinterpret_cast<const char*>(start) + \
@@ -47,7 +48,7 @@
#define ASAN_UNPOISON_MEMORY_REGION(start, size) \
ASAN_POISON_MEMORY_REGION(start, size)
-#define ASAN_CHECK_MEMORY_REGION_IS_POISONED(start, size) \
+#define ASAN_CHECK_WHOLE_MEMORY_REGION_IS_POISONED(start, size) \
ASAN_POISON_MEMORY_REGION(start, size)
#endif // !V8_USE_ADDRESS_SANITIZER
diff --git a/deps/v8/src/base/sanitizer/tsan.h b/deps/v8/src/base/sanitizer/tsan.h
new file mode 100644
index 0000000000..854c82eb22
--- /dev/null
+++ b/deps/v8/src/base/sanitizer/tsan.h
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ThreadSanitizer support.
+
+#ifndef V8_BASE_SANITIZER_TSAN_H_
+#define V8_BASE_SANITIZER_TSAN_H_
+
+#if defined(THREAD_SANITIZER)
+
+#define DISABLE_TSAN __attribute__((no_sanitize_thread))
+
+#else // !defined(THREAD_SANITIZER)
+
+#define DISABLE_TSAN
+
+#endif // !defined(THREAD_SANITIZER)
+
+#endif // V8_BASE_SANITIZER_TSAN_H_
diff --git a/deps/v8/src/base/win32-headers.h b/deps/v8/src/base/win32-headers.h
index e4e845d86d..95aedd8c95 100644
--- a/deps/v8/src/base/win32-headers.h
+++ b/deps/v8/src/base/win32-headers.h
@@ -41,12 +41,6 @@
#include <signal.h> // For raise().
#include <time.h> // For LocalOffset() implementation.
-#ifdef __MINGW32__
-// Require Windows XP or higher when compiling with MinGW. This is for MinGW
-// header files to expose getaddrinfo.
-#undef _WIN32_WINNT
-#define _WIN32_WINNT 0x501
-#endif // __MINGW32__
#if !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
#include <errno.h> // For STRUNCATE
#endif // !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
diff --git a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
index 040761091a..db3c05ce18 100644
--- a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
+++ b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
@@ -501,14 +501,21 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- __ masm()->add(params_size, params_size,
- Operand(1)); // Include the receiver.
- __ masm()->Drop(params_size);
+ __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ masm()->Ret();
}
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->cmp(reg, kInterpreterAccumulatorRegister);
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue);
+}
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
index cda2108327..7824f92c2a 100644
--- a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
+++ b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
@@ -583,13 +583,21 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- __ masm()->Add(params_size, params_size, 1); // Include the receiver.
- __ masm()->DropArguments(params_size);
+ __ masm()->DropArguments(params_size,
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ masm()->Ret();
}
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->CmpTagged(reg, kInterpreterAccumulatorRegister);
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue);
+}
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/baseline/baseline-assembler-inl.h b/deps/v8/src/baseline/baseline-assembler-inl.h
index 83c102176f..583db7e679 100644
--- a/deps/v8/src/baseline/baseline-assembler-inl.h
+++ b/deps/v8/src/baseline/baseline-assembler-inl.h
@@ -34,6 +34,8 @@
#include "src/baseline/mips64/baseline-assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/baseline/mips/baseline-assembler-mips-inl.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/baseline/loong64/baseline-assembler-loong64-inl.h"
#else
#error Unsupported target architecture.
#endif
@@ -135,6 +137,24 @@ SaveAccumulatorScope::~SaveAccumulatorScope() {
assembler_->Pop(kInterpreterAccumulatorRegister);
}
+EnsureAccumulatorPreservedScope::EnsureAccumulatorPreservedScope(
+ BaselineAssembler* assembler)
+ : assembler_(assembler)
+#ifdef V8_CODE_COMMENTS
+ ,
+ comment_(assembler->masm(), "EnsureAccumulatorPreservedScope")
+#endif
+{
+ assembler_->Push(kInterpreterAccumulatorRegister);
+}
+
+EnsureAccumulatorPreservedScope::~EnsureAccumulatorPreservedScope() {
+ BaselineAssembler::ScratchRegisterScope scratch(assembler_);
+ Register reg = scratch.AcquireScratch();
+ assembler_->Pop(reg);
+ AssertEqualToAccumulator(reg);
+}
+
#undef __
} // namespace baseline
diff --git a/deps/v8/src/baseline/baseline-assembler.h b/deps/v8/src/baseline/baseline-assembler.h
index e1063ff2b2..b8c876a8d3 100644
--- a/deps/v8/src/baseline/baseline-assembler.h
+++ b/deps/v8/src/baseline/baseline-assembler.h
@@ -202,6 +202,21 @@ class SaveAccumulatorScope final {
BaselineAssembler* assembler_;
};
+class EnsureAccumulatorPreservedScope final {
+ public:
+ inline explicit EnsureAccumulatorPreservedScope(BaselineAssembler* assembler);
+
+ inline ~EnsureAccumulatorPreservedScope();
+
+ private:
+ inline void AssertEqualToAccumulator(Register reg);
+
+ BaselineAssembler* assembler_;
+#ifdef V8_CODE_COMMENTS
+ Assembler::CodeComment comment_;
+#endif
+};
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/baseline/baseline-batch-compiler.cc b/deps/v8/src/baseline/baseline-batch-compiler.cc
index 6a25df7264..fb66139a31 100644
--- a/deps/v8/src/baseline/baseline-batch-compiler.cc
+++ b/deps/v8/src/baseline/baseline-batch-compiler.cc
@@ -6,9 +6,8 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS
+#include "src/flags/flags.h"
+#if ENABLE_SPARKPLUG
#include "src/baseline/baseline-compiler.h"
#include "src/codegen/compiler.h"
@@ -40,7 +39,7 @@ bool BaselineBatchCompiler::EnqueueFunction(Handle<JSFunction> function) {
Handle<SharedFunctionInfo> shared(function->shared(), isolate_);
// Early return if the function is compiled with baseline already or it is not
// suitable for baseline compilation.
- if (shared->HasBaselineData()) return true;
+ if (shared->HasBaselineCode()) return true;
if (!CanCompileWithBaseline(isolate_, *shared)) return false;
// Immediately compile the function if batch compilation is disabled.
diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc
index f30812c85a..63d684e733 100644
--- a/deps/v8/src/baseline/baseline-compiler.cc
+++ b/deps/v8/src/baseline/baseline-compiler.cc
@@ -48,6 +48,8 @@
#include "src/baseline/mips64/baseline-compiler-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/baseline/mips/baseline-compiler-mips-inl.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/baseline/loong64/baseline-compiler-loong64-inl.h"
#else
#error Unsupported target architecture.
#endif
@@ -321,9 +323,16 @@ MaybeHandle<Code> BaselineCompiler::Build(Isolate* isolate) {
// Allocate the bytecode offset table.
Handle<ByteArray> bytecode_offset_table =
bytecode_offset_table_builder_.ToBytecodeOffsetTable(isolate);
- return Factory::CodeBuilder(isolate, desc, CodeKind::BASELINE)
- .set_bytecode_offset_table(bytecode_offset_table)
- .TryBuild();
+
+ Factory::CodeBuilder code_builder(isolate, desc, CodeKind::BASELINE);
+ code_builder.set_bytecode_offset_table(bytecode_offset_table);
+ if (shared_function_info_->HasInterpreterData()) {
+ code_builder.set_interpreter_data(
+ handle(shared_function_info_->interpreter_data(), isolate));
+ } else {
+ code_builder.set_interpreter_data(bytecode_);
+ }
+ return code_builder.TryBuild();
}
int BaselineCompiler::EstimateInstructionSize(BytecodeArray bytecode) {
@@ -488,13 +497,31 @@ void BaselineCompiler::VisitSingleBytecode() {
TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry);
#endif
- switch (iterator().current_bytecode()) {
+ {
+ interpreter::Bytecode bytecode = iterator().current_bytecode();
+
+#ifdef DEBUG
+ base::Optional<EnsureAccumulatorPreservedScope> accumulator_preserved_scope;
+ // We should make sure to preserve the accumulator whenever the bytecode
+ // isn't registered as writing to it. We can't do this for jumps or switches
+ // though, since the control flow would not match the control flow of this
+ // scope.
+ if (FLAG_debug_code &&
+ !interpreter::Bytecodes::WritesAccumulator(bytecode) &&
+ !interpreter::Bytecodes::IsJump(bytecode) &&
+ !interpreter::Bytecodes::IsSwitch(bytecode)) {
+ accumulator_preserved_scope.emplace(&basm_);
+ }
+#endif // DEBUG
+
+ switch (bytecode) {
#define BYTECODE_CASE(name, ...) \
case interpreter::Bytecode::k##name: \
Visit##name(); \
break;
- BYTECODE_LIST(BYTECODE_CASE)
+ BYTECODE_LIST(BYTECODE_CASE)
#undef BYTECODE_CASE
+ }
}
#ifdef V8_TRACE_UNOPTIMIZED
@@ -1173,53 +1200,57 @@ void BaselineCompiler::BuildCall(uint32_t slot, uint32_t arg_count,
void BaselineCompiler::VisitCallAnyReceiver() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
- uint32_t arg_count = args.register_count() - 1; // Remove receiver.
+ uint32_t arg_count = args.register_count();
+ if (!kJSArgcIncludesReceiver) arg_count -= 1; // Remove receiver.
BuildCall<ConvertReceiverMode::kAny>(Index(3), arg_count, args);
}
void BaselineCompiler::VisitCallProperty() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
- uint32_t arg_count = args.register_count() - 1; // Remove receiver.
+ uint32_t arg_count = args.register_count();
+ if (!kJSArgcIncludesReceiver) arg_count -= 1; // Remove receiver.
BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(Index(3), arg_count,
args);
}
void BaselineCompiler::VisitCallProperty0() {
- BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(Index(2), 0,
- RegisterOperand(1));
+ BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(
+ Index(2), JSParameterCount(0), RegisterOperand(1));
}
void BaselineCompiler::VisitCallProperty1() {
BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(
- Index(3), 1, RegisterOperand(1), RegisterOperand(2));
+ Index(3), JSParameterCount(1), RegisterOperand(1), RegisterOperand(2));
}
void BaselineCompiler::VisitCallProperty2() {
BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(
- Index(4), 2, RegisterOperand(1), RegisterOperand(2), RegisterOperand(3));
+ Index(4), JSParameterCount(2), RegisterOperand(1), RegisterOperand(2),
+ RegisterOperand(3));
}
void BaselineCompiler::VisitCallUndefinedReceiver() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
- uint32_t arg_count = args.register_count();
+ uint32_t arg_count = JSParameterCount(args.register_count());
BuildCall<ConvertReceiverMode::kNullOrUndefined>(
Index(3), arg_count, RootIndex::kUndefinedValue, args);
}
void BaselineCompiler::VisitCallUndefinedReceiver0() {
- BuildCall<ConvertReceiverMode::kNullOrUndefined>(Index(1), 0,
- RootIndex::kUndefinedValue);
+ BuildCall<ConvertReceiverMode::kNullOrUndefined>(
+ Index(1), JSParameterCount(0), RootIndex::kUndefinedValue);
}
void BaselineCompiler::VisitCallUndefinedReceiver1() {
BuildCall<ConvertReceiverMode::kNullOrUndefined>(
- Index(2), 1, RootIndex::kUndefinedValue, RegisterOperand(1));
+ Index(2), JSParameterCount(1), RootIndex::kUndefinedValue,
+ RegisterOperand(1));
}
void BaselineCompiler::VisitCallUndefinedReceiver2() {
BuildCall<ConvertReceiverMode::kNullOrUndefined>(
- Index(3), 2, RootIndex::kUndefinedValue, RegisterOperand(1),
- RegisterOperand(2));
+ Index(3), JSParameterCount(2), RootIndex::kUndefinedValue,
+ RegisterOperand(1), RegisterOperand(2));
}
void BaselineCompiler::VisitCallWithSpread() {
@@ -1229,7 +1260,8 @@ void BaselineCompiler::VisitCallWithSpread() {
interpreter::Register spread_register = args.last_register();
args = args.Truncate(args.register_count() - 1);
- uint32_t arg_count = args.register_count() - 1; // Remove receiver.
+ uint32_t arg_count = args.register_count();
+ if (!kJSArgcIncludesReceiver) arg_count -= 1; // Remove receiver.
CallBuiltin<Builtin::kCallWithSpread_Baseline>(
RegisterOperand(0), // kFunction
@@ -1253,7 +1285,7 @@ void BaselineCompiler::VisitCallRuntimeForPair() {
void BaselineCompiler::VisitCallJSRuntime() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
- uint32_t arg_count = args.register_count();
+ uint32_t arg_count = JSParameterCount(args.register_count());
// Load context for LoadNativeContextSlot.
__ LoadContext(kContextRegister);
@@ -1376,7 +1408,7 @@ void BaselineCompiler::VisitIntrinsicAsyncGeneratorYield(
void BaselineCompiler::VisitConstruct() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
- uint32_t arg_count = args.register_count();
+ uint32_t arg_count = JSParameterCount(args.register_count());
CallBuiltin<Builtin::kConstruct_Baseline>(
RegisterOperand(0), // kFunction
kInterpreterAccumulatorRegister, // kNewTarget
@@ -1393,7 +1425,7 @@ void BaselineCompiler::VisitConstructWithSpread() {
interpreter::Register spread_register = args.last_register();
args = args.Truncate(args.register_count() - 1);
- uint32_t arg_count = args.register_count();
+ uint32_t arg_count = JSParameterCount(args.register_count());
using Descriptor =
CallInterfaceDescriptorFor<Builtin::kConstructWithSpread_Baseline>::type;
@@ -2079,13 +2111,15 @@ void BaselineCompiler::VisitReturn() {
iterator().current_bytecode_size_without_prefix();
int parameter_count = bytecode_->parameter_count();
- // We must pop all arguments from the stack (including the receiver). This
- // number of arguments is given by max(1 + argc_reg, parameter_count).
- int parameter_count_without_receiver =
- parameter_count - 1; // Exclude the receiver to simplify the
- // computation. We'll account for it at the end.
- TailCallBuiltin<Builtin::kBaselineLeaveFrame>(
- parameter_count_without_receiver, -profiling_weight);
+ if (kJSArgcIncludesReceiver) {
+ TailCallBuiltin<Builtin::kBaselineLeaveFrame>(parameter_count,
+ -profiling_weight);
+
+ } else {
+ int parameter_count_without_receiver = parameter_count - 1;
+ TailCallBuiltin<Builtin::kBaselineLeaveFrame>(
+ parameter_count_without_receiver, -profiling_weight);
+ }
}
void BaselineCompiler::VisitThrowReferenceErrorIfHole() {
diff --git a/deps/v8/src/baseline/baseline-compiler.h b/deps/v8/src/baseline/baseline-compiler.h
index d8cd9ac5c6..341e7c0822 100644
--- a/deps/v8/src/baseline/baseline-compiler.h
+++ b/deps/v8/src/baseline/baseline-compiler.h
@@ -162,6 +162,7 @@ class BaselineCompiler {
LocalIsolate* local_isolate_;
RuntimeCallStats* stats_;
Handle<SharedFunctionInfo> shared_function_info_;
+ Handle<HeapObject> interpreter_data_;
Handle<BytecodeArray> bytecode_;
MacroAssembler masm_;
BaselineAssembler basm_;
diff --git a/deps/v8/src/baseline/baseline.cc b/deps/v8/src/baseline/baseline.cc
index cec0805aec..764d2db645 100644
--- a/deps/v8/src/baseline/baseline.cc
+++ b/deps/v8/src/baseline/baseline.cc
@@ -43,6 +43,13 @@ bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) {
// Functions with breakpoints have to stay interpreted.
if (shared.HasBreakInfo()) return false;
+ // Functions with instrumented bytecode can't be baseline compiled since the
+ // baseline code's bytecode array pointer is immutable.
+ if (shared.HasDebugInfo() &&
+ shared.GetDebugInfo().HasInstrumentedBytecodeArray()) {
+ return false;
+ }
+
// Do not baseline compile if function doesn't pass sparkplug_filter.
if (!shared.PassesFilter(FLAG_sparkplug_filter)) return false;
diff --git a/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h b/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
index e3f991886d..e280bee3da 100644
--- a/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
+++ b/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
@@ -457,16 +457,21 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- Register return_pc = scratch;
- __ masm()->PopReturnAddressTo(return_pc);
- __ masm()->lea(esp, MemOperand(esp, params_size, times_system_pointer_size,
- kSystemPointerSize));
- __ masm()->PushReturnAddressFrom(return_pc);
+ __ masm()->DropArguments(
+ params_size, scratch, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ masm()->Ret();
}
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->cmp(reg, kInterpreterAccumulatorRegister);
+ assembler_->masm()->Assert(equal, AbortReason::kUnexpectedValue);
+}
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h
new file mode 100644
index 0000000000..059d932ef9
--- /dev/null
+++ b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h
@@ -0,0 +1,503 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_LOONG64_BASELINE_ASSEMBLER_LOONG64_INL_H_
+#define V8_BASELINE_LOONG64_BASELINE_ASSEMBLER_LOONG64_INL_H_
+
+#include "src/baseline/baseline-assembler.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/loong64/assembler-loong64-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+class BaselineAssembler::ScratchRegisterScope {
+ public:
+ explicit ScratchRegisterScope(BaselineAssembler* assembler)
+ : assembler_(assembler),
+ prev_scope_(assembler->scratch_register_scope_),
+ wrapped_scope_(assembler->masm()) {
+ if (!assembler_->scratch_register_scope_) {
+ // If we haven't opened a scratch scope yet, for the first one add a
+ // couple of extra registers.
+ wrapped_scope_.Include(t0.bit() | t1.bit() | t2.bit() | t3.bit());
+ }
+ assembler_->scratch_register_scope_ = this;
+ }
+ ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
+
+ Register AcquireScratch() { return wrapped_scope_.Acquire(); }
+
+ private:
+ BaselineAssembler* assembler_;
+ ScratchRegisterScope* prev_scope_;
+ UseScratchRegisterScope wrapped_scope_;
+};
+
+enum class Condition : uint32_t {
+ kEqual = eq,
+ kNotEqual = ne,
+
+ kLessThan = lt,
+ kGreaterThan = gt,
+ kLessThanEqual = le,
+ kGreaterThanEqual = ge,
+
+ kUnsignedLessThan = Uless,
+ kUnsignedGreaterThan = Ugreater,
+ kUnsignedLessThanEqual = Uless_equal,
+ kUnsignedGreaterThanEqual = Ugreater_equal,
+
+ kOverflow = overflow,
+ kNoOverflow = no_overflow,
+
+ kZero = eq,
+ kNotZero = ne,
+};
+
+inline internal::Condition AsMasmCondition(Condition cond) {
+ STATIC_ASSERT(sizeof(internal::Condition) == sizeof(Condition));
+ return static_cast<internal::Condition>(cond);
+}
+
+namespace detail {
+
+#ifdef DEBUG
+inline bool Clobbers(Register target, MemOperand op) {
+ return op.base() == target || op.index() == target;
+}
+#endif
+
+} // namespace detail
+
+#define __ masm_->
+
+MemOperand BaselineAssembler::RegisterFrameOperand(
+ interpreter::Register interpreter_register) {
+ return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
+}
+MemOperand BaselineAssembler::FeedbackVectorOperand() {
+ return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
+}
+
+void BaselineAssembler::Bind(Label* label) { __ bind(label); }
+
+void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); }
+
+void BaselineAssembler::JumpTarget() {
+ // NOP.
+}
+void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
+ __ Branch(target);
+}
+void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
+ Label* target, Label::Distance) {
+ __ JumpIfRoot(value, index, target);
+}
+void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
+ Label* target, Label::Distance) {
+ __ JumpIfNotRoot(value, index, target);
+}
+void BaselineAssembler::JumpIfSmi(Register value, Label* target,
+ Label::Distance) {
+ __ JumpIfSmi(value, target);
+}
+void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
+ Label::Distance) {
+ __ JumpIfNotSmi(value, target);
+}
+
+void BaselineAssembler::CallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT_STRING(masm_,
+ __ CommentForOffHeapTrampoline("call", builtin));
+ Register temp = t7;
+ __ LoadEntryFromBuiltin(builtin, temp);
+ __ Call(temp);
+}
+
+void BaselineAssembler::TailCallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT_STRING(masm_,
+ __ CommentForOffHeapTrampoline("tail call", builtin));
+ Register temp = t7;
+ __ LoadEntryFromBuiltin(builtin, temp);
+ __ Jump(temp);
+}
+
+void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
+ Label* target, Label::Distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ __ And(scratch, value, Operand(mask));
+ __ Branch(target, AsMasmCondition(cc), scratch, Operand(zero_reg));
+}
+
+void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
+ Label* target, Label::Distance) {
+ __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
+}
+void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
+ InstanceType instance_type,
+ Register map, Label* target,
+ Label::Distance) {
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ __ GetObjectType(object, map, type);
+ __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
+}
+void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
+ InstanceType instance_type,
+ Label* target, Label::Distance) {
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ if (FLAG_debug_code) {
+ __ AssertNotSmi(map);
+ __ GetObjectType(map, type, type);
+ __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
+ }
+ __ Ld_d(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
+}
+void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
+ Label* target, Label::Distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ __ li(scratch, Operand(smi));
+ __ SmiUntag(scratch);
+ __ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
+}
+void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
+ Label* target, Label::Distance) {
+ __ AssertSmi(lhs);
+ __ AssertSmi(rhs);
+ __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
+}
+void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
+ MemOperand operand, Label* target,
+ Label::Distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ __ Ld_d(scratch, operand);
+ __ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
+}
+void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
+ Register value, Label* target,
+ Label::Distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ __ Ld_d(scratch, operand);
+ __ Branch(target, AsMasmCondition(cc), scratch, Operand(value));
+}
+void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
+ Label* target, Label::Distance) {
+ __ Branch(target, AsMasmCondition(cc), value, Operand(byte));
+}
+void BaselineAssembler::Move(interpreter::Register output, Register source) {
+ Move(RegisterFrameOperand(output), source);
+}
+void BaselineAssembler::Move(Register output, TaggedIndex value) {
+ __ li(output, Operand(value.ptr()));
+}
+void BaselineAssembler::Move(MemOperand output, Register source) {
+ __ St_d(source, output);
+}
+void BaselineAssembler::Move(Register output, ExternalReference reference) {
+ __ li(output, Operand(reference));
+}
+void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
+ __ li(output, Operand(value));
+}
+void BaselineAssembler::Move(Register output, int32_t value) {
+ __ li(output, Operand(value));
+}
+void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
+ __ Move(output, source);
+}
+void BaselineAssembler::MoveSmi(Register output, Register source) {
+ __ Move(output, source);
+}
+
+namespace detail {
+
+template <typename Arg>
+inline Register ToRegister(BaselineAssembler* basm,
+ BaselineAssembler::ScratchRegisterScope* scope,
+ Arg arg) {
+ Register reg = scope->AcquireScratch();
+ basm->Move(reg, arg);
+ return reg;
+}
+inline Register ToRegister(BaselineAssembler* basm,
+ BaselineAssembler::ScratchRegisterScope* scope,
+ Register reg) {
+ return reg;
+}
+
+template <typename... Args>
+struct PushAllHelper;
+template <>
+struct PushAllHelper<> {
+ static int Push(BaselineAssembler* basm) { return 0; }
+ static int PushReverse(BaselineAssembler* basm) { return 0; }
+};
+// TODO(ishell): try to pack sequence of pushes into one instruction by
+// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
+// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
+template <typename Arg>
+struct PushAllHelper<Arg> {
+ static int Push(BaselineAssembler* basm, Arg arg) {
+ BaselineAssembler::ScratchRegisterScope scope(basm);
+ basm->masm()->Push(ToRegister(basm, &scope, arg));
+ return 1;
+ }
+ static int PushReverse(BaselineAssembler* basm, Arg arg) {
+ return Push(basm, arg);
+ }
+};
+// TODO(ishell): try to pack sequence of pushes into one instruction by
+// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
+// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
+template <typename Arg, typename... Args>
+struct PushAllHelper<Arg, Args...> {
+ static int Push(BaselineAssembler* basm, Arg arg, Args... args) {
+ PushAllHelper<Arg>::Push(basm, arg);
+ return 1 + PushAllHelper<Args...>::Push(basm, args...);
+ }
+ static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) {
+ int nargs = PushAllHelper<Args...>::PushReverse(basm, args...);
+ PushAllHelper<Arg>::Push(basm, arg);
+ return nargs + 1;
+ }
+};
+
+template <>
+struct PushAllHelper<interpreter::RegisterList> {
+ static int Push(BaselineAssembler* basm, interpreter::RegisterList list) {
+ for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
+ PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
+ }
+ return list.register_count();
+ }
+ static int PushReverse(BaselineAssembler* basm,
+ interpreter::RegisterList list) {
+ for (int reg_index = list.register_count() - 1; reg_index >= 0;
+ --reg_index) {
+ PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
+ }
+ return list.register_count();
+ }
+};
+
+template <typename... T>
+struct PopAllHelper;
+template <>
+struct PopAllHelper<> {
+ static void Pop(BaselineAssembler* basm) {}
+};
+// TODO(ishell): try to pack sequence of pops into one instruction by
+// looking at regiser codes. For example, Pop(r1, r2, r5, r0, r3, r4)
+// could be generated as two pops: Pop(r1, r2, r5) and Pop(r0, r3, r4).
+template <>
+struct PopAllHelper<Register> {
+ static void Pop(BaselineAssembler* basm, Register reg) {
+ basm->masm()->Pop(reg);
+ }
+};
+template <typename... T>
+struct PopAllHelper<Register, T...> {
+ static void Pop(BaselineAssembler* basm, Register reg, T... tail) {
+ PopAllHelper<Register>::Pop(basm, reg);
+ PopAllHelper<T...>::Pop(basm, tail...);
+ }
+};
+
+} // namespace detail
+
+template <typename... T>
+int BaselineAssembler::Push(T... vals) {
+ return detail::PushAllHelper<T...>::Push(this, vals...);
+}
+
+template <typename... T>
+void BaselineAssembler::PushReverse(T... vals) {
+ detail::PushAllHelper<T...>::PushReverse(this, vals...);
+}
+
+template <typename... T>
+void BaselineAssembler::Pop(T... registers) {
+ detail::PopAllHelper<T...>::Pop(this, registers...);
+}
+
+void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
+ int offset) {
+ __ Ld_d(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
+ int offset) {
+ __ Ld_d(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
+ int offset) {
+ __ Ld_d(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadByteField(Register output, Register source,
+ int offset) {
+ __ Ld_b(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
+ Smi value) {
+ ASM_CODE_COMMENT(masm_);
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ __ li(scratch, Operand(value));
+ __ St_d(scratch, FieldMemOperand(target, offset));
+}
+void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
+ int offset,
+ Register value) {
+ ASM_CODE_COMMENT(masm_);
+ __ St_d(value, FieldMemOperand(target, offset));
+ ScratchRegisterScope temps(this);
+ __ RecordWriteField(target, offset, value, kRAHasNotBeenSaved,
+ SaveFPRegsMode::kIgnore);
+}
+void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
+ int offset,
+ Register value) {
+ __ St_d(value, FieldMemOperand(target, offset));
+}
+void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
+ int32_t weight, Label* skip_interrupt_label) {
+ ASM_CODE_COMMENT(masm_);
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+
+ Register interrupt_budget = scratch_scope.AcquireScratch();
+ __ Ld_w(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ __ Add_w(interrupt_budget, interrupt_budget, weight);
+ __ St_w(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ if (skip_interrupt_label) {
+ DCHECK_LT(weight, 0);
+ __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
+ }
+}
+void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
+ Register weight, Label* skip_interrupt_label) {
+ ASM_CODE_COMMENT(masm_);
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+
+ Register interrupt_budget = scratch_scope.AcquireScratch();
+ __ Ld_w(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ __ Add_w(interrupt_budget, interrupt_budget, weight);
+ __ St_w(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ if (skip_interrupt_label)
+ __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
+}
+
+void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
+ __ Add_d(lhs, lhs, Operand(rhs));
+}
+
+void BaselineAssembler::Switch(Register reg, int case_value_base,
+ Label** labels, int num_labels) {
+ ASM_CODE_COMMENT(masm_);
+ Label fallthrough;
+ if (case_value_base > 0) {
+ __ Sub_d(reg, reg, Operand(case_value_base));
+ }
+
+ ScratchRegisterScope scope(this);
+ Register scratch = scope.AcquireScratch();
+ __ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
+ reg, Operand(num_labels));
+ int entry_size_log2 = 2;
+ __ pcaddi(scratch, 3);
+ __ Alsl_d(scratch, reg, scratch, entry_size_log2);
+ __ Jump(scratch);
+ {
+ TurboAssembler::BlockTrampolinePoolScope(masm());
+ __ BlockTrampolinePoolFor(num_labels * kInstrSize);
+ for (int i = 0; i < num_labels; ++i) {
+ __ Branch(labels[i]);
+ }
+ __ bind(&fallthrough);
+ }
+}
+
+#undef __
+
+#define __ basm.
+
+void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
+ ASM_CODE_COMMENT(masm);
+ BaselineAssembler basm(masm);
+
+ Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
+ Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
+
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget");
+
+ Label skip_interrupt_label;
+ __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label);
+ __ masm()->SmiTag(params_size);
+ __ masm()->Push(params_size, kInterpreterAccumulatorRegister);
+
+ __ LoadContext(kContextRegister);
+ __ LoadFunction(kJSFunctionRegister);
+ __ masm()->Push(kJSFunctionRegister);
+ __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
+
+ __ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
+ __ masm()->SmiUntag(params_size);
+ __ Bind(&skip_interrupt_label);
+ }
+
+ BaselineAssembler::ScratchRegisterScope temps(&basm);
+ Register actual_params_size = temps.AcquireScratch();
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Move(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label corrected_args_count;
+ __ masm()->Branch(&corrected_args_count, ge, params_size,
+ Operand(actual_params_size));
+ __ masm()->Move(params_size, actual_params_size);
+ __ Bind(&corrected_args_count);
+
+ // Leave the frame (also dropping the register file).
+ __ masm()->LeaveFrame(StackFrame::BASELINE);
+
+ // Drop receiver + arguments.
+ __ masm()->Add_d(params_size, params_size, 1); // Include the receiver.
+ __ masm()->Alsl_d(sp, params_size, sp, kPointerSizeLog2);
+ __ masm()->Ret();
+}
+
+#undef __
+
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg,
+ Operand(kInterpreterAccumulatorRegister));
+}
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_LOONG64_BASELINE_ASSEMBLER_LOONG64_INL_H_
diff --git a/deps/v8/src/baseline/loong64/baseline-compiler-loong64-inl.h b/deps/v8/src/baseline/loong64/baseline-compiler-loong64-inl.h
new file mode 100644
index 0000000000..9a68c7ebca
--- /dev/null
+++ b/deps/v8/src/baseline/loong64/baseline-compiler-loong64-inl.h
@@ -0,0 +1,77 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_LOONG64_BASELINE_COMPILER_LOONG64_INL_H_
+#define V8_BASELINE_LOONG64_BASELINE_COMPILER_LOONG64_INL_H_
+
+#include "src/base/logging.h"
+#include "src/baseline/baseline-compiler.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+#define __ basm_.
+
+void BaselineCompiler::Prologue() {
+ ASM_CODE_COMMENT(&masm_);
+ __ masm()->EnterFrame(StackFrame::BASELINE);
+ DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
+ int max_frame_size =
+ bytecode_->frame_size() + max_call_args_ * kSystemPointerSize;
+ CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
+
+ PrologueFillFrame();
+}
+
+void BaselineCompiler::PrologueFillFrame() {
+ ASM_CODE_COMMENT(&masm_);
+ // Inlined register frame fill
+ interpreter::Register new_target_or_generator_register =
+ bytecode_->incoming_new_target_or_generator_register();
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ int register_count = bytecode_->register_count();
+ // Magic value
+ const int kLoopUnrollSize = 8;
+ const int new_target_index = new_target_or_generator_register.index();
+ const bool has_new_target = new_target_index != kMaxInt;
+ if (has_new_target) {
+ DCHECK_LE(new_target_index, register_count);
+ __ masm()->Add_d(sp, sp, Operand(-(kPointerSize * new_target_index)));
+ for (int i = 0; i < new_target_index; i++) {
+ __ masm()->St_d(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
+ }
+ // Push new_target_or_generator.
+ __ Push(kJavaScriptCallNewTargetRegister);
+ register_count -= new_target_index + 1;
+ }
+ if (register_count < 2 * kLoopUnrollSize) {
+ // If the frame is small enough, just unroll the frame fill completely.
+ __ masm()->Add_d(sp, sp, Operand(-(kPointerSize * register_count)));
+ for (int i = 0; i < register_count; ++i) {
+ __ masm()->St_d(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
+ }
+ } else {
+ __ masm()->Add_d(sp, sp, Operand(-(kPointerSize * register_count)));
+ for (int i = 0; i < register_count; ++i) {
+ __ masm()->St_d(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
+ }
+ }
+}
+
+void BaselineCompiler::VerifyFrameSize() {
+ ASM_CODE_COMMENT(&masm_);
+ __ masm()->Add_d(t0, sp,
+ Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
+ bytecode_->frame_size()));
+ __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, t0, Operand(fp));
+}
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_LOONG64_BASELINE_COMPILER_LOONG64_INL_H_
diff --git a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h
index 31bc96861b..989d5c4ae5 100644
--- a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h
+++ b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h
@@ -506,6 +506,12 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg,
+ Operand(kInterpreterAccumulatorRegister));
+}
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
index d8220fa798..561e45249e 100644
--- a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
+++ b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
@@ -504,6 +504,12 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg,
+ Operand(kInterpreterAccumulatorRegister));
+}
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
index 01f5a5802b..663462fdb5 100644
--- a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
+++ b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
@@ -109,30 +109,19 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
}
void BaselineAssembler::CallBuiltin(Builtin builtin) {
- if (masm()->options().short_builtin_calls) {
- __ CallBuiltin(builtin);
- } else {
- ASM_CODE_COMMENT_STRING(masm_,
- __ CommentForOffHeapTrampoline("call", builtin));
- Register temp = t6;
- __ LoadEntryFromBuiltin(builtin, temp);
- __ Call(temp);
- }
+ ASM_CODE_COMMENT_STRING(masm_,
+ __ CommentForOffHeapTrampoline("call", builtin));
+ Register temp = t6;
+ __ LoadEntryFromBuiltin(builtin, temp);
+ __ Call(temp);
}
void BaselineAssembler::TailCallBuiltin(Builtin builtin) {
- if (masm()->options().short_builtin_calls) {
- // Generate pc-relative jump.
- __ TailCallBuiltin(builtin);
- } else {
- ASM_CODE_COMMENT_STRING(
- masm_, __ CommentForOffHeapTrampoline("tail call", builtin));
- // t6 be used for function call in RISCV64
- // For example 'jalr t6' or 'jal t6'
- Register temp = t6;
- __ LoadEntryFromBuiltin(builtin, temp);
- __ Jump(temp);
- }
+ ASM_CODE_COMMENT_STRING(masm_,
+ __ CommentForOffHeapTrampoline("tail call", builtin));
+ Register temp = t6;
+ __ LoadEntryFromBuiltin(builtin, temp);
+ __ Jump(temp);
}
void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
@@ -140,7 +129,7 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ And(tmp, value, Operand(mask));
- __ Branch(target, AsMasmCondition(cc), tmp, Operand(mask));
+ __ Branch(target, AsMasmCondition(cc), tmp, Operand(zero_reg));
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
@@ -161,6 +150,11 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
Label* target, Label::Distance) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
+ if (FLAG_debug_code) {
+ __ AssertNotSmi(map);
+ __ GetObjectType(map, type, type);
+ __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
+ }
__ Ld(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
}
@@ -182,44 +176,28 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance) {
- ScratchRegisterScope temps(this);
- Register temp = temps.AcquireScratch();
+ // todo: compress pointer
__ AssertSmi(lhs);
__ AssertSmi(rhs);
- if (COMPRESS_POINTERS_BOOL) {
- __ Sub32(temp, lhs, rhs);
- } else {
- __ Sub64(temp, lhs, rhs);
- }
- __ Branch(target, AsMasmCondition(cc), temp, Operand(zero_reg));
+ __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
}
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance) {
+ // todo: compress pointer
ScratchRegisterScope temps(this);
- Register tmp1 = temps.AcquireScratch();
- Register tmp2 = temps.AcquireScratch();
- __ Ld(tmp1, operand);
- if (COMPRESS_POINTERS_BOOL) {
- __ Sub32(tmp2, value, tmp1);
- } else {
- __ Sub64(tmp2, value, tmp1);
- }
- __ Branch(target, AsMasmCondition(cc), tmp2, Operand(zero_reg));
+ Register scratch = temps.AcquireScratch();
+ __ Ld(scratch, operand);
+ __ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
Label::Distance) {
+ // todo: compress pointer
ScratchRegisterScope temps(this);
- Register tmp1 = temps.AcquireScratch();
- Register tmp2 = temps.AcquireScratch();
- __ Ld(tmp1, operand);
- if (COMPRESS_POINTERS_BOOL) {
- __ Sub32(tmp2, tmp1, value);
- } else {
- __ Sub64(tmp2, tmp1, value);
- }
- __ Branch(target, AsMasmCondition(cc), tmp2, Operand(zero_reg));
+ Register scratch = temps.AcquireScratch();
+ __ Ld(scratch, operand);
+ __ Branch(target, AsMasmCondition(cc), scratch, Operand(value));
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance) {
@@ -268,136 +246,50 @@ inline Register ToRegister(BaselineAssembler* basm,
}
template <typename... Args>
-struct CountPushHelper;
-template <>
-struct CountPushHelper<> {
- static int Count() { return 0; }
-};
-template <typename Arg, typename... Args>
-struct CountPushHelper<Arg, Args...> {
- static int Count(Arg arg, Args... args) {
- return 1 + CountPushHelper<Args...>::Count(args...);
- }
-};
-template <typename... Args>
-struct CountPushHelper<interpreter::RegisterList, Args...> {
- static int Count(interpreter::RegisterList list, Args... args) {
- return list.register_count() + CountPushHelper<Args...>::Count(args...);
- }
-};
-
-template <typename... Args>
struct PushAllHelper;
-template <typename... Args>
-void PushAll(BaselineAssembler* basm, Args... args) {
- PushAllHelper<Args...>::Push(basm, args...);
-}
-template <typename... Args>
-void PushAllReverse(BaselineAssembler* basm, Args... args) {
- PushAllHelper<Args...>::PushReverse(basm, args...);
-}
-
template <>
struct PushAllHelper<> {
- static void Push(BaselineAssembler* basm) {}
- static void PushReverse(BaselineAssembler* basm) {}
+ static int Push(BaselineAssembler* basm) { return 0; }
+ static int PushReverse(BaselineAssembler* basm) { return 0; }
};
-
-inline void PushSingle(MacroAssembler* masm, RootIndex source) {
- masm->PushRoot(source);
-}
-inline void PushSingle(MacroAssembler* masm, Register reg) { masm->Push(reg); }
-
-inline void PushSingle(MacroAssembler* masm, Smi value) { masm->Push(value); }
-inline void PushSingle(MacroAssembler* masm, Handle<HeapObject> object) {
- masm->Push(object);
-}
-inline void PushSingle(MacroAssembler* masm, int32_t immediate) {
- masm->li(kScratchReg, (int64_t)(immediate));
- PushSingle(masm, kScratchReg);
-}
-
-inline void PushSingle(MacroAssembler* masm, TaggedIndex value) {
- masm->li(kScratchReg, static_cast<int64_t>(value.ptr()));
- PushSingle(masm, kScratchReg);
-}
-inline void PushSingle(MacroAssembler* masm, MemOperand operand) {
- masm->Ld(kScratchReg, operand);
- PushSingle(masm, kScratchReg);
-}
-inline void PushSingle(MacroAssembler* masm, interpreter::Register source) {
- return PushSingle(masm, BaselineAssembler::RegisterFrameOperand(source));
-}
-
template <typename Arg>
struct PushAllHelper<Arg> {
- static void Push(BaselineAssembler* basm, Arg arg) {
- PushSingle(basm->masm(), arg);
+ static int Push(BaselineAssembler* basm, Arg arg) {
+ BaselineAssembler::ScratchRegisterScope scope(basm);
+ basm->masm()->Push(ToRegister(basm, &scope, arg));
+ return 1;
}
- static void PushReverse(BaselineAssembler* basm, Arg arg) {
- // Push the padding register to round up the amount of values pushed.
+ static int PushReverse(BaselineAssembler* basm, Arg arg) {
return Push(basm, arg);
}
};
-template <typename Arg1, typename Arg2, typename... Args>
-struct PushAllHelper<Arg1, Arg2, Args...> {
- static void Push(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
- Args... args) {
- {
- BaselineAssembler::ScratchRegisterScope scope(basm);
- basm->masm()->Push(ToRegister(basm, &scope, arg1),
- ToRegister(basm, &scope, arg2));
- }
- PushAll(basm, args...);
+template <typename Arg, typename... Args>
+struct PushAllHelper<Arg, Args...> {
+ static int Push(BaselineAssembler* basm, Arg arg, Args... args) {
+ PushAllHelper<Arg>::Push(basm, arg);
+ return 1 + PushAllHelper<Args...>::Push(basm, args...);
}
- static void PushReverse(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
- Args... args) {
- PushAllReverse(basm, args...);
- {
- BaselineAssembler::ScratchRegisterScope scope(basm);
- basm->masm()->Push(ToRegister(basm, &scope, arg2),
- ToRegister(basm, &scope, arg1));
- }
- }
-};
-// Currently RegisterLists are always be the last argument, so we don't
-// specialize for the case where they're not. We do still specialise for the
-// aligned and unaligned cases.
-template <typename Arg>
-struct PushAllHelper<Arg, interpreter::RegisterList> {
- static void Push(BaselineAssembler* basm, Arg arg,
- interpreter::RegisterList list) {
- DCHECK_EQ(list.register_count() % 2, 1);
- PushAll(basm, arg, list[0], list.PopLeft());
- }
- static void PushReverse(BaselineAssembler* basm, Arg arg,
- interpreter::RegisterList list) {
- if (list.register_count() == 0) {
- PushAllReverse(basm, arg);
- } else {
- PushAllReverse(basm, arg, list[0], list.PopLeft());
- }
+ static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) {
+ int nargs = PushAllHelper<Args...>::PushReverse(basm, args...);
+ PushAllHelper<Arg>::Push(basm, arg);
+ return nargs + 1;
}
};
template <>
struct PushAllHelper<interpreter::RegisterList> {
- static void Push(BaselineAssembler* basm, interpreter::RegisterList list) {
- DCHECK_EQ(list.register_count() % 2, 0);
- for (int reg_index = 0; reg_index < list.register_count(); reg_index += 2) {
- PushAll(basm, list[reg_index], list[reg_index + 1]);
+ static int Push(BaselineAssembler* basm, interpreter::RegisterList list) {
+ for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
+ PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
}
+ return list.register_count();
}
- static void PushReverse(BaselineAssembler* basm,
- interpreter::RegisterList list) {
- int reg_index = list.register_count() - 1;
- if (reg_index % 2 == 0) {
- // Push the padding register to round up the amount of values pushed.
- PushAllReverse(basm, list[reg_index]);
- reg_index--;
- }
- for (; reg_index >= 1; reg_index -= 2) {
- PushAllReverse(basm, list[reg_index - 1], list[reg_index]);
+ static int PushReverse(BaselineAssembler* basm,
+ interpreter::RegisterList list) {
+ for (int reg_index = list.register_count() - 1; reg_index >= 0;
+ --reg_index) {
+ PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
}
+ return list.register_count();
}
};
@@ -414,10 +306,9 @@ struct PopAllHelper<Register> {
}
};
template <typename... T>
-struct PopAllHelper<Register, Register, T...> {
- static void Pop(BaselineAssembler* basm, Register reg1, Register reg2,
- T... tail) {
- basm->masm()->Pop(reg1, reg2);
+struct PopAllHelper<Register, T...> {
+ static void Pop(BaselineAssembler* basm, Register reg, T... tail) {
+ PopAllHelper<Register>::Pop(basm, reg);
PopAllHelper<T...>::Pop(basm, tail...);
}
};
@@ -426,20 +317,12 @@ struct PopAllHelper<Register, Register, T...> {
template <typename... T>
int BaselineAssembler::Push(T... vals) {
- // We have to count the pushes first, to decide whether to add padding before
- // the first push.
- int push_count = detail::CountPushHelper<T...>::Count(vals...);
- if (push_count % 2 == 0) {
- detail::PushAll(this, vals...);
- } else {
- detail::PushAll(this, vals...);
- }
- return push_count;
+ return detail::PushAllHelper<T...>::Push(this, vals...);
}
template <typename... T>
void BaselineAssembler::PushReverse(T... vals) {
- detail::PushAllReverse(this, vals...);
+ detail::PushAllHelper<T...>::PushReverse(this, vals...);
}
template <typename... T>
@@ -461,7 +344,7 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
}
void BaselineAssembler::LoadByteField(Register output, Register source,
int offset) {
- __ Ld(output, FieldMemOperand(source, offset));
+ __ Lb(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
@@ -495,11 +378,11 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
- __ Ld(interrupt_budget,
+ __ Lw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
// Remember to set flags as part of the add!
- __ Add64(interrupt_budget, interrupt_budget, weight);
- __ Sd(interrupt_budget,
+ __ Add32(interrupt_budget, interrupt_budget, weight);
+ __ Sw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
if (skip_interrupt_label) {
DCHECK_LT(weight, 0);
@@ -517,11 +400,11 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
- __ Ld(interrupt_budget,
+ __ Lw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
// Remember to set flags as part of the add!
- __ Add64(interrupt_budget, interrupt_budget, weight);
- __ Sd(interrupt_budget,
+ __ Add32(interrupt_budget, interrupt_budget, weight);
+ __ Sw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
if (skip_interrupt_label)
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(weight));
@@ -546,7 +429,6 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
// Mostly copied from code-generator-riscv64.cc
ScratchRegisterScope scope(this);
- Register temp = scope.AcquireScratch();
Label table;
__ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
reg, Operand(int64_t(num_labels)));
@@ -555,21 +437,20 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
DCHECK(is_int32(imm64));
int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
- __ auipc(temp, Hi20); // Read PC + Hi20 into t6
- __ lui(temp, Lo12); // jump PC + Hi20 + Lo12
+ __ auipc(t6, Hi20); // Read PC + Hi20 into t6
+ __ addi(t6, t6, Lo12); // jump PC + Hi20 + Lo12
- int entry_size_log2 = 2;
- Register temp2 = scope.AcquireScratch();
- __ CalcScaledAddress(temp2, temp, reg, entry_size_log2);
- __ Jump(temp);
+ int entry_size_log2 = 3;
+ __ CalcScaledAddress(t6, t6, reg, entry_size_log2);
+ __ Jump(t6);
{
TurboAssembler::BlockTrampolinePoolScope(masm());
- __ BlockTrampolinePoolFor(num_labels * kInstrSize);
+ __ BlockTrampolinePoolFor(num_labels * kInstrSize * 2);
__ bind(&table);
for (int i = 0; i < num_labels; ++i) {
- __ Branch(labels[i]);
+ __ BranchLong(labels[i]);
}
- DCHECK_EQ(num_labels * kInstrSize, __ InstructionsGeneratedSince(&table));
+ DCHECK_EQ(num_labels * 2, __ InstructionsGeneratedSince(&table));
__ bind(&fallthrough);
}
}
@@ -598,7 +479,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->Push(kJSFunctionRegister);
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
- __ masm()->Pop(kInterpreterAccumulatorRegister, params_size);
+ __ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
__ masm()->SmiUntag(params_size);
__ Bind(&skip_interrupt_label);
@@ -630,6 +511,11 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg,
+ Operand(kInterpreterAccumulatorRegister));
+}
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h
index fc73105b8e..1fbdaa0761 100644
--- a/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h
+++ b/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h
@@ -37,69 +37,35 @@ void BaselineCompiler::PrologueFillFrame() {
const int kLoopUnrollSize = 8;
const int new_target_index = new_target_or_generator_register.index();
const bool has_new_target = new_target_index != kMaxInt;
- // BaselineOutOfLinePrologue already pushed one undefined.
- register_count -= 1;
if (has_new_target) {
- if (new_target_index == 0) {
- // Oops, need to fix up that undefined that BaselineOutOfLinePrologue
- // pushed.
- __ masm()->Sd(kJavaScriptCallNewTargetRegister, MemOperand(sp));
- } else {
- DCHECK_LE(new_target_index, register_count);
- int index = 1;
- for (; index + 2 <= new_target_index; index += 2) {
- __ masm()->Push(kInterpreterAccumulatorRegister,
- kInterpreterAccumulatorRegister);
- }
- if (index == new_target_index) {
- __ masm()->Push(kJavaScriptCallNewTargetRegister,
- kInterpreterAccumulatorRegister);
- } else {
- DCHECK_EQ(index, new_target_index - 1);
- __ masm()->Push(kInterpreterAccumulatorRegister,
- kJavaScriptCallNewTargetRegister);
- }
- // We pushed "index" registers, minus the one the prologue pushed, plus
- // the two registers that included new_target.
- register_count -= (index - 1 + 2);
+ DCHECK_LE(new_target_index, register_count);
+ __ masm()->Add64(sp, sp, Operand(-(kPointerSize * new_target_index)));
+ for (int i = 0; i < new_target_index; i++) {
+ __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
+ // Push new_target_or_generator.
+ __ Push(kJavaScriptCallNewTargetRegister);
+ register_count -= new_target_index + 1;
}
if (register_count < 2 * kLoopUnrollSize) {
// If the frame is small enough, just unroll the frame fill completely.
- for (int i = 0; i < register_count; i += 2) {
- __ masm()->Push(kInterpreterAccumulatorRegister,
- kInterpreterAccumulatorRegister);
+ __ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count)));
+ for (int i = 0; i < register_count; ++i) {
+ __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
} else {
- BaselineAssembler::ScratchRegisterScope temps(&basm_);
- Register scratch = temps.AcquireScratch();
-
- // Extract the first few registers to round to the unroll size.
- int first_registers = register_count % kLoopUnrollSize;
- for (int i = 0; i < first_registers; i += 2) {
- __ masm()->Push(kInterpreterAccumulatorRegister,
- kInterpreterAccumulatorRegister);
- }
- __ Move(scratch, register_count / kLoopUnrollSize);
- // We enter the loop unconditionally, so make sure we need to loop at least
- // once.
- DCHECK_GT(register_count / kLoopUnrollSize, 0);
- Label loop;
- __ Bind(&loop);
- for (int i = 0; i < kLoopUnrollSize; i += 2) {
- __ masm()->Push(kInterpreterAccumulatorRegister,
- kInterpreterAccumulatorRegister);
+ __ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count)));
+ for (int i = 0; i < register_count; ++i) {
+ __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
- __ masm()->Branch(&loop, gt, scratch, Operand(1));
}
}
void BaselineCompiler::VerifyFrameSize() {
ASM_CODE_COMMENT(&masm_);
__ masm()->Add64(kScratchReg, sp,
- RoundUp(InterpreterFrameConstants::kFixedFrameSizeFromFp +
- bytecode_->frame_size(),
- 2 * kSystemPointerSize));
+ Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
+ bytecode_->frame_size()));
__ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg,
Operand(fp));
}
diff --git a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
index f18ac84eae..aa9564dcea 100644
--- a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
+++ b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
@@ -468,16 +468,21 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- Register return_pc = scratch;
- __ masm()->PopReturnAddressTo(return_pc);
- __ masm()->leaq(rsp, MemOperand(rsp, params_size, times_system_pointer_size,
- kSystemPointerSize));
- __ masm()->PushReturnAddressFrom(return_pc);
+ __ masm()->DropArguments(
+ params_size, scratch, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ masm()->Ret();
}
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->cmp_tagged(reg, kInterpreterAccumulatorRegister);
+ assembler_->masm()->Assert(equal, AbortReason::kUnexpectedValue);
+}
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/bigint/bigint-internal.h b/deps/v8/src/bigint/bigint-internal.h
index 4c214153bf..e1e8cf77a0 100644
--- a/deps/v8/src/bigint/bigint-internal.h
+++ b/deps/v8/src/bigint/bigint-internal.h
@@ -22,6 +22,7 @@ constexpr int kNewtonInversionThreshold = 50;
// kBarrettThreshold is defined in bigint.h.
constexpr int kToStringFastThreshold = 43;
+constexpr int kFromStringLargeThreshold = 300;
class ProcessorImpl : public Processor {
public:
@@ -69,6 +70,8 @@ class ProcessorImpl : public Processor {
void FromString(RWDigits Z, FromStringAccumulator* accumulator);
void FromStringClassic(RWDigits Z, FromStringAccumulator* accumulator);
+ void FromStringLarge(RWDigits Z, FromStringAccumulator* accumulator);
+ void FromStringBasePowerOfTwo(RWDigits Z, FromStringAccumulator* accumulator);
bool should_terminate() { return status_ == Status::kInterrupted; }
diff --git a/deps/v8/src/bigint/bigint.h b/deps/v8/src/bigint/bigint.h
index 218bf4616c..47159d0bf4 100644
--- a/deps/v8/src/bigint/bigint.h
+++ b/deps/v8/src/bigint/bigint.h
@@ -262,6 +262,8 @@ class Processor {
// upon return will be set to the actual length of the result string.
Status ToString(char* out, int* out_length, Digits X, int radix, bool sign);
+ // Z := the contents of {accumulator}.
+ // Assume that this leaves {accumulator} in unusable state.
Status FromString(RWDigits Z, FromStringAccumulator* accumulator);
};
@@ -336,7 +338,7 @@ class FromStringAccumulator {
// So for sufficiently large N, setting max_digits=N here will not actually
// allow parsing BigInts with N digits. We can fix that if/when anyone cares.
explicit FromStringAccumulator(int max_digits)
- : max_digits_(std::max(max_digits - kStackParts, kStackParts)) {}
+ : max_digits_(std::max(max_digits, kStackParts)) {}
// Step 2: Call this method to read all characters.
// {Char} should be a character type, such as uint8_t or uint16_t.
@@ -348,7 +350,7 @@ class FromStringAccumulator {
digit_t radix);
// Step 3: Check if a result is available, and determine its required
- // allocation size.
+ // allocation size (guaranteed to be <= max_digits passed to the constructor).
Result result() { return result_; }
int ResultLength() {
return std::max(stack_parts_used_, static_cast<int>(heap_parts_.size()));
@@ -360,8 +362,12 @@ class FromStringAccumulator {
private:
friend class ProcessorImpl;
- ALWAYS_INLINE bool AddPart(digit_t multiplier, digit_t part,
- bool is_last = false);
+ template <class Char>
+ ALWAYS_INLINE const Char* ParsePowerTwo(const Char* start, const Char* end,
+ digit_t radix);
+
+ ALWAYS_INLINE bool AddPart(digit_t multiplier, digit_t part, bool is_last);
+ ALWAYS_INLINE bool AddPart(digit_t part);
digit_t stack_parts_[kStackParts];
std::vector<digit_t> heap_parts_;
@@ -371,6 +377,7 @@ class FromStringAccumulator {
Result result_{Result::kOk};
int stack_parts_used_{0};
bool inline_everything_{false};
+ uint8_t radix_{0};
};
// The rest of this file is the inlineable implementation of
@@ -403,6 +410,47 @@ static constexpr uint8_t kCharValue[] = {
25, 26, 27, 28, 29, 30, 31, 32, // 112..119
33, 34, 35, 255, 255, 255, 255, 255, // 120..127 'z' == 122
};
+
+// A space- and time-efficient way to map {2,4,8,16,32} to {1,2,3,4,5}.
+static constexpr uint8_t kCharBits[] = {1, 2, 3, 0, 4, 0, 0, 0, 5};
+
+template <class Char>
+const Char* FromStringAccumulator::ParsePowerTwo(const Char* current,
+ const Char* end,
+ digit_t radix) {
+ radix_ = static_cast<uint8_t>(radix);
+ const int char_bits = kCharBits[radix >> 2];
+ int bits_left;
+ bool done = false;
+ do {
+ digit_t part = 0;
+ bits_left = kDigitBits;
+ while (true) {
+ digit_t d; // Numeric value of the current character {c}.
+ uint32_t c = *current;
+ if (c > 127 || (d = bigint::kCharValue[c]) >= radix) {
+ done = true;
+ break;
+ }
+
+ if (bits_left < char_bits) break;
+ bits_left -= char_bits;
+ part = (part << char_bits) | d;
+
+ ++current;
+ if (current == end) {
+ done = true;
+ break;
+ }
+ }
+ if (!AddPart(part)) return current;
+ } while (!done);
+ // We use the unused {last_multiplier_} field to
+ // communicate how many bits are unused in the last part.
+ last_multiplier_ = bits_left;
+ return current;
+}
+
template <class Char>
const Char* FromStringAccumulator::Parse(const Char* start, const Char* end,
digit_t radix) {
@@ -417,12 +465,15 @@ const Char* FromStringAccumulator::Parse(const Char* start, const Char* end,
static constexpr int kInlineThreshold = kStackParts * kDigitBits * 100 / 517;
inline_everything_ = (end - start) <= kInlineThreshold;
#endif
+ if (!inline_everything_ && (radix & (radix - 1)) == 0) {
+ return ParsePowerTwo(start, end, radix);
+ }
bool done = false;
do {
digit_t multiplier = 1;
digit_t part = 0;
while (true) {
- digit_t d;
+ digit_t d; // Numeric value of the current character {c}.
uint32_t c = *current;
if (c > 127 || (d = bigint::kCharValue[c]) >= radix) {
done = true;
@@ -478,6 +529,10 @@ bool FromStringAccumulator::AddPart(digit_t multiplier, digit_t part,
BIGINT_H_DCHECK(max_multiplier_ == 0 || max_multiplier_ == multiplier);
max_multiplier_ = multiplier;
}
+ return AddPart(part);
+}
+
+bool FromStringAccumulator::AddPart(digit_t part) {
if (stack_parts_used_ < kStackParts) {
stack_parts_[stack_parts_used_++] = part;
return true;
@@ -489,7 +544,7 @@ bool FromStringAccumulator::AddPart(digit_t multiplier, digit_t part,
heap_parts_.push_back(stack_parts_[i]);
}
}
- if (static_cast<int>(heap_parts_.size()) >= max_digits_ && !is_last) {
+ if (static_cast<int>(heap_parts_.size()) >= max_digits_) {
result_ = Result::kMaxSizeExceeded;
return false;
}
diff --git a/deps/v8/src/bigint/fromstring.cc b/deps/v8/src/bigint/fromstring.cc
index 0307745cad..a4b34a1a02 100644
--- a/deps/v8/src/bigint/fromstring.cc
+++ b/deps/v8/src/bigint/fromstring.cc
@@ -40,7 +40,6 @@ void ProcessorImpl::FromStringClassic(RWDigits Z,
// Parts are stored on the heap.
for (int i = 1; i < num_heap_parts - 1; i++) {
MultiplySingle(Z, already_set, max_multiplier);
- if (should_terminate()) return;
Add(Z, accumulator->heap_parts_[i]);
already_set.set_len(already_set.len() + 1);
}
@@ -48,6 +47,262 @@ void ProcessorImpl::FromStringClassic(RWDigits Z,
Add(Z, accumulator->heap_parts_.back());
}
+// The fast algorithm: combine parts in a balanced-binary-tree like order:
+// Multiply-and-add neighboring pairs of parts, then loop, until only one
+// part is left. The benefit is that the multiplications will have inputs of
+// similar sizes, which makes them amenable to fast multiplication algorithms.
+// We have to do more multiplications than the classic algorithm though,
+// because we also have to multiply the multipliers.
+// Optimizations:
+// - We can skip the multiplier for the first part, because we never need it.
+// - Most multipliers are the same; we can avoid repeated multiplications and
+// just copy the previous result. (In theory we could even de-dupe them, but
+// as the parts/multipliers grow, we'll need most of the memory anyway.)
+// Copied results are marked with a * below.
+// - We can re-use memory using a system of three buffers whose usage rotates:
+// - one is considered empty, and is overwritten with the new parts,
+// - one holds the multipliers (and will be "empty" in the next round), and
+// - one initially holds the parts and is overwritten with the new multipliers
+// Parts and multipliers both grow in each iteration, and get fewer, so we
+// use the space of two adjacent old chunks for one new chunk.
+// Since the {heap_parts_} vectors has the right size, and so does the
+// result {Z}, we can use that memory, and only need to allocate one scratch
+// vector. If the final result ends up in the wrong bucket, we have to copy it
+// to the correct one.
+// - We don't have to keep track of the positions and sizes of the chunks,
+// because we can deduce their precise placement from the iteration index.
+//
+// Example, assuming digit_t is 4 bits, fitting one decimal digit:
+// Initial state:
+// parts_: 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+// multipliers_: 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10
+// After the first iteration of the outer loop:
+// parts: 12 34 56 78 90 12 34 5
+// multipliers: 100 *100 *100 *100 *100 *100 10
+// After the second iteration:
+// parts: 1234 5678 9012 345
+// multipliers: 10000 *10000 1000
+// After the third iteration:
+// parts: 12345678 9012345
+// multipliers: 10000000
+// And then there's an obvious last iteration.
+void ProcessorImpl::FromStringLarge(RWDigits Z,
+ FromStringAccumulator* accumulator) {
+ int num_parts = static_cast<int>(accumulator->heap_parts_.size());
+ DCHECK(num_parts >= 2); // NOLINT(readability/check)
+ DCHECK(Z.len() >= num_parts);
+ RWDigits parts(accumulator->heap_parts_.data(), num_parts);
+ Storage multipliers_storage(num_parts);
+ RWDigits multipliers(multipliers_storage.get(), num_parts);
+ RWDigits temp(Z, 0, num_parts);
+ // Unrolled and specialized first iteration: part_len == 1, so instead of
+ // Digits sub-vectors we have individual digit_t values, and the multipliers
+ // are known up front.
+ {
+ digit_t max_multiplier = accumulator->max_multiplier_;
+ digit_t last_multiplier = accumulator->last_multiplier_;
+ RWDigits new_parts = temp;
+ RWDigits new_multipliers = parts;
+ int i = 0;
+ for (; i + 1 < num_parts; i += 2) {
+ digit_t p_in = parts[i];
+ digit_t p_in2 = parts[i + 1];
+ digit_t m_in = max_multiplier;
+ digit_t m_in2 = i == num_parts - 2 ? last_multiplier : max_multiplier;
+ // p[j] = p[i] * m[i+1] + p[i+1]
+ digit_t p_high;
+ digit_t p_low = digit_mul(p_in, m_in2, &p_high);
+ digit_t carry;
+ new_parts[i] = digit_add2(p_low, p_in2, &carry);
+ new_parts[i + 1] = p_high + carry;
+ // m[j] = m[i] * m[i+1]
+ if (i > 0) {
+ if (i > 2 && m_in2 != last_multiplier) {
+ new_multipliers[i] = new_multipliers[i - 2];
+ new_multipliers[i + 1] = new_multipliers[i - 1];
+ } else {
+ digit_t m_high;
+ new_multipliers[i] = digit_mul(m_in, m_in2, &m_high);
+ new_multipliers[i + 1] = m_high;
+ }
+ }
+ }
+ // Trailing last part (if {num_parts} was odd).
+ if (i < num_parts) {
+ new_parts[i] = parts[i];
+ new_multipliers[i] = last_multiplier;
+ i += 2;
+ }
+ num_parts = i >> 1;
+ RWDigits new_temp = multipliers;
+ parts = new_parts;
+ multipliers = new_multipliers;
+ temp = new_temp;
+ AddWorkEstimate(num_parts);
+ }
+ int part_len = 2;
+
+ // Remaining iterations.
+ while (num_parts > 1) {
+ RWDigits new_parts = temp;
+ RWDigits new_multipliers = parts;
+ int new_part_len = part_len * 2;
+ int i = 0;
+ for (; i + 1 < num_parts; i += 2) {
+ int start = i * part_len;
+ Digits p_in(parts, start, part_len);
+ Digits p_in2(parts, start + part_len, part_len);
+ Digits m_in(multipliers, start, part_len);
+ Digits m_in2(multipliers, start + part_len, part_len);
+ RWDigits p_out(new_parts, start, new_part_len);
+ RWDigits m_out(new_multipliers, start, new_part_len);
+ // p[j] = p[i] * m[i+1] + p[i+1]
+ Multiply(p_out, p_in, m_in2);
+ if (should_terminate()) return;
+ digit_t overflow = AddAndReturnOverflow(p_out, p_in2);
+ DCHECK(overflow == 0); // NOLINT(readability/check)
+ USE(overflow);
+ // m[j] = m[i] * m[i+1]
+ if (i > 0) {
+ bool copied = false;
+ if (i > 2) {
+ int prev_start = (i - 2) * part_len;
+ Digits m_in_prev(multipliers, prev_start, part_len);
+ Digits m_in2_prev(multipliers, prev_start + part_len, part_len);
+ if (Compare(m_in, m_in_prev) == 0 &&
+ Compare(m_in2, m_in2_prev) == 0) {
+ copied = true;
+ Digits m_out_prev(new_multipliers, prev_start, new_part_len);
+ for (int k = 0; k < new_part_len; k++) m_out[k] = m_out_prev[k];
+ }
+ }
+ if (!copied) {
+ Multiply(m_out, m_in, m_in2);
+ if (should_terminate()) return;
+ }
+ }
+ }
+ // Trailing last part (if {num_parts} was odd).
+ if (i < num_parts) {
+ Digits p_in(parts, i * part_len, part_len);
+ Digits m_in(multipliers, i * part_len, part_len);
+ RWDigits p_out(new_parts, i * part_len, new_part_len);
+ RWDigits m_out(new_multipliers, i * part_len, new_part_len);
+ int k = 0;
+ for (; k < p_in.len(); k++) p_out[k] = p_in[k];
+ for (; k < p_out.len(); k++) p_out[k] = 0;
+ k = 0;
+ for (; k < m_in.len(); k++) m_out[k] = m_in[k];
+ for (; k < m_out.len(); k++) m_out[k] = 0;
+ i += 2;
+ }
+ num_parts = i >> 1;
+ part_len = new_part_len;
+ RWDigits new_temp = multipliers;
+ parts = new_parts;
+ multipliers = new_multipliers;
+ temp = new_temp;
+ }
+ // Copy the result to Z, if it doesn't happen to be there already.
+ if (parts.digits() != Z.digits()) {
+ int i = 0;
+ for (; i < parts.len(); i++) Z[i] = parts[i];
+ // Z might be bigger than we requested; be robust towards that.
+ for (; i < Z.len(); i++) Z[i] = 0;
+ }
+}
+
+// Specialized algorithms for power-of-two radixes. Designed to work with
+// {ParsePowerTwo}: {max_multiplier_} isn't saved, but {radix_} is, and
+// {last_multiplier_} has special meaning, namely the number of unpopulated bits
+// in the last part.
+// For these radixes, {parts} already is a list of correct bit sequences, we
+// just have to put them together in the right way:
+// - The parts are currently in reversed order. The highest-index parts[i]
+// will go into Z[0].
+// - All parts, possibly except for the last, are maximally populated.
+// - A maximally populated part stores a non-fractional number of characters,
+// i.e. the largest fitting multiple of {char_bits} of it is populated.
+// - The populated bits in a part are at the low end.
+// - The number of unused bits in the last part is stored in
+// {accumulator->last_multiplier_}.
+//
+// Example: Given the following parts vector, where letters are used to
+// label bits, bit order is big endian (i.e. [00000101] encodes "5"),
+// 'x' means "unpopulated", kDigitBits == 8, radix == 8, and char_bits == 3:
+//
+// parts[0] -> [xxABCDEF][xxGHIJKL][xxMNOPQR][xxxxxSTU] <- parts[3]
+//
+// We have to assemble the following result:
+//
+// Z[0] -> [NOPQRSTU][FGHIJKLM][xxxABCDE] <- Z[2]
+//
+void ProcessorImpl::FromStringBasePowerOfTwo(
+ RWDigits Z, FromStringAccumulator* accumulator) {
+ const int num_parts = accumulator->ResultLength();
+ DCHECK(num_parts >= 1); // NOLINT(readability/check)
+ DCHECK(Z.len() >= num_parts);
+ Digits parts(accumulator->heap_parts_.size() > 0
+ ? accumulator->heap_parts_.data()
+ : accumulator->stack_parts_,
+ num_parts);
+ uint8_t radix = accumulator->radix_;
+ DCHECK(radix == 2 || radix == 4 || radix == 8 || radix == 16 || radix == 32);
+ const int char_bits = BitLength(radix - 1);
+ const int unused_last_part_bits =
+ static_cast<int>(accumulator->last_multiplier_);
+ const int unused_part_bits = kDigitBits % char_bits;
+ const int max_part_bits = kDigitBits - unused_part_bits;
+ int z_index = 0;
+ int part_index = num_parts - 1;
+
+ // If the last part is fully populated, then all parts must be, and we can
+ // simply copy them (in reversed order).
+ if (unused_last_part_bits == 0) {
+ DCHECK(kDigitBits % char_bits == 0); // NOLINT(readability/check)
+ while (part_index >= 0) {
+ Z[z_index++] = parts[part_index--];
+ }
+ for (; z_index < Z.len(); z_index++) Z[z_index] = 0;
+ return;
+ }
+
+ // Otherwise we have to shift parts contents around as needed.
+ // Holds the next Z digit that we want to store...
+ digit_t digit = parts[part_index--];
+ // ...and the number of bits (at the right end) we already know.
+ int digit_bits = kDigitBits - unused_last_part_bits;
+ while (part_index >= 0) {
+ // Holds the last part that we read from {parts}...
+ digit_t part;
+ // ...and the number of bits (at the right end) that we haven't used yet.
+ int part_bits;
+ while (digit_bits < kDigitBits) {
+ part = parts[part_index--];
+ part_bits = max_part_bits;
+ digit |= part << digit_bits;
+ int part_shift = kDigitBits - digit_bits;
+ if (part_shift > part_bits) {
+ digit_bits += part_bits;
+ part = 0;
+ part_bits = 0;
+ if (part_index < 0) break;
+ } else {
+ digit_bits = kDigitBits;
+ part >>= part_shift;
+ part_bits -= part_shift;
+ }
+ }
+ Z[z_index++] = digit;
+ digit = part;
+ digit_bits = part_bits;
+ }
+ if (digit_bits > 0) {
+ Z[z_index++] = digit;
+ }
+ for (; z_index < Z.len(); z_index++) Z[z_index] = 0;
+}
+
void ProcessorImpl::FromString(RWDigits Z, FromStringAccumulator* accumulator) {
if (accumulator->inline_everything_) {
int i = 0;
@@ -57,8 +312,12 @@ void ProcessorImpl::FromString(RWDigits Z, FromStringAccumulator* accumulator) {
for (; i < Z.len(); i++) Z[i] = 0;
} else if (accumulator->stack_parts_used_ == 0) {
for (int i = 0; i < Z.len(); i++) Z[i] = 0;
- } else {
+ } else if (IsPowerOfTwo(accumulator->radix_)) {
+ FromStringBasePowerOfTwo(Z, accumulator);
+ } else if (accumulator->ResultLength() < kFromStringLargeThreshold) {
FromStringClassic(Z, accumulator);
+ } else {
+ FromStringLarge(Z, accumulator);
}
}
diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc
index 8d26259204..0d994d2d03 100644
--- a/deps/v8/src/builtins/accessors.cc
+++ b/deps/v8/src/builtins/accessors.cc
@@ -17,7 +17,6 @@
#include "src/objects/contexts.h"
#include "src/objects/field-index-inl.h"
#include "src/objects/js-array-inl.h"
-#include "src/objects/js-regexp-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/property-details.h"
#include "src/objects/prototype.h"
diff --git a/deps/v8/src/builtins/accessors.h b/deps/v8/src/builtins/accessors.h
index 0148b8e3d1..27ff276821 100644
--- a/deps/v8/src/builtins/accessors.h
+++ b/deps/v8/src/builtins/accessors.h
@@ -5,7 +5,7 @@
#ifndef V8_BUILTINS_ACCESSORS_H_
#define V8_BUILTINS_ACCESSORS_H_
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
#include "src/base/bit-field.h"
#include "src/common/globals.h"
#include "src/objects/property-details.h"
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index f45c927e67..1ef63e1096 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -76,6 +76,36 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
+enum class ArgumentsElementType {
+ kRaw, // Push arguments as they are.
+ kHandle // Dereference arguments before pushing.
+};
+
+void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
+ Register scratch,
+ ArgumentsElementType element_type) {
+ DCHECK(!AreAliased(array, argc, scratch));
+ UseScratchRegisterScope temps(masm);
+ Register counter = scratch;
+ Register value = temps.Acquire();
+ Label loop, entry;
+ if (kJSArgcIncludesReceiver) {
+ __ sub(counter, argc, Operand(kJSArgcReceiverSlots));
+ } else {
+ __ mov(counter, argc);
+ }
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(value, MemOperand(array, counter, LSL, kSystemPointerSizeLog2));
+ if (element_type == ArgumentsElementType::kHandle) {
+ __ ldr(value, MemOperand(value));
+ }
+ __ push(value);
+ __ bind(&entry);
+ __ sub(counter, counter, Operand(1), SetCC);
+ __ b(ge, &loop);
+}
+
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -106,12 +136,14 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// correct position (including any undefined), instead of delaying this to
// InvokeFunction.
- // Set up pointer to last argument (skip receiver).
+ // Set up pointer to first argument (skip receiver).
__ add(
r4, fp,
Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
// Copy arguments and receiver to the expression stack.
- __ PushArray(r4, r0, r5);
+ // r4: Pointer to start of arguments.
+ // r0: Number of arguments.
+ Generate_PushArguments(masm, r4, r0, r5, ArgumentsElementType::kRaw);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
@@ -130,7 +162,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(scratch, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ Jump(lr);
__ bind(&stack_overflow);
@@ -230,7 +264,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// InvokeFunction.
// Copy arguments to the expression stack.
- __ PushArray(r4, r0, r5);
+ // r4: Pointer to start of argument.
+ // r0: Number of arguments.
+ Generate_PushArguments(masm, r4, r0, r5, ArgumentsElementType::kRaw);
// Push implicit receiver.
__ Push(r6);
@@ -276,7 +312,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(r1, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ Jump(lr);
__ bind(&check_receiver);
@@ -308,14 +346,32 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ cmp(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
+ __ Assert(eq, AbortReason::kExpectedBaselineData);
+}
+
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Register sfi_data,
Register scratch1,
Label* is_baseline) {
ASM_CODE_COMMENT(masm);
Label done;
- __ CompareObjectType(sfi_data, scratch1, scratch1, BASELINE_DATA_TYPE);
- __ b(eq, is_baseline);
+ __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ b(ne, &not_baseline);
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ __ b(eq, is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ b(eq, is_baseline);
+ }
__ cmp(scratch1, Operand(INTERPRETER_DATA_TYPE));
__ b(ne, &done);
__ ldr(sfi_data,
@@ -383,6 +439,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldrh(r3,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ sub(r3, r3, Operand(kJSArgcReceiverSlots));
+ }
__ ldr(r2,
FieldMemOperand(r1, JSGeneratorObject::kParametersAndRegistersOffset));
{
@@ -705,7 +764,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments + receiver.
// Clobbers r5.
Label enough_stack_space, stack_overflow;
- __ add(r6, r0, Operand(1)); // Add one for receiver.
+ if (kJSArgcIncludesReceiver) {
+ __ mov(r6, r0);
+ } else {
+ __ add(r6, r0, Operand(1)); // Add one for receiver.
+ }
__ StackOverflowCheck(r6, r5, &stack_overflow);
__ b(&enough_stack_space);
__ bind(&stack_overflow);
@@ -715,24 +778,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&enough_stack_space);
- // Copy arguments to the stack in a loop.
+ // Copy arguments to the stack.
// r1: new.target
// r2: function
// r3: receiver
// r0: argc
// r4: argv, i.e. points to first arg
- Label loop, entry;
- __ add(r6, r4, Operand(r0, LSL, kSystemPointerSizeLog2));
- // r6 points past last arg.
- __ b(&entry);
- __ bind(&loop);
- __ ldr(r5, MemOperand(r6, -kSystemPointerSize,
- PreIndex)); // read next parameter
- __ ldr(r5, MemOperand(r5)); // dereference handle
- __ push(r5); // push parameter
- __ bind(&entry);
- __ cmp(r4, r6);
- __ b(ne, &loop);
+ Generate_PushArguments(masm, r4, r0, r5, ArgumentsElementType::kHandle);
// Push the receiver.
__ Push(r3);
@@ -815,7 +867,9 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ ldr(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ lsl(actual_params_size, actual_params_size, Operand(kPointerSizeLog2));
- __ add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+ if (!kJSArgcIncludesReceiver) {
+ __ add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+ }
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -1196,7 +1250,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// stack left to right.
//
// The live registers are:
-// o r0: actual argument count (not including the receiver)
+// o r0: actual argument count
// o r1: the JS function object being called.
// o r3: the incoming new target or generator object
// o cp: our context
@@ -1414,8 +1468,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&has_optimized_code_or_marker);
// Load the baseline code into the closure.
- __ ldr(r2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BaselineData::kBaselineCodeOffset));
+ __ mov(r2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, r2, closure);
__ JumpCodeObject(r2);
@@ -1451,7 +1504,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
InterpreterPushArgsMode mode) {
DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r2 : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
@@ -1464,15 +1517,18 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ sub(r0, r0, Operand(1));
}
- __ add(r3, r0, Operand(1)); // Add one for receiver.
-
- __ StackOverflowCheck(r3, r4, &stack_overflow);
-
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- // Don't copy receiver. Argument count is correct.
+ const bool skip_receiver =
+ receiver_mode == ConvertReceiverMode::kNullOrUndefined;
+ if (kJSArgcIncludesReceiver && skip_receiver) {
+ __ sub(r3, r0, Operand(kJSArgcReceiverSlots));
+ } else if (!kJSArgcIncludesReceiver && !skip_receiver) {
+ __ add(r3, r0, Operand(1));
+ } else {
__ mov(r3, r0);
}
+ __ StackOverflowCheck(r3, r4, &stack_overflow);
+
// Push the arguments. r2 and r4 will be modified.
GenerateInterpreterPushArgs(masm, r3, r2, r4);
@@ -1510,7 +1566,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
- // -- r0 : argument count (not including receiver)
+ // -- r0 : argument count
// -- r3 : new target
// -- r1 : constructor to call
// -- r2 : allocation site feedback if available, undefined otherwise.
@@ -1518,17 +1574,20 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// -----------------------------------
Label stack_overflow;
- __ add(r5, r0, Operand(1)); // Add one for receiver.
-
- __ StackOverflowCheck(r5, r6, &stack_overflow);
+ __ StackOverflowCheck(r0, r6, &stack_overflow);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ sub(r0, r0, Operand(1));
}
+ Register argc_without_receiver = r0;
+ if (kJSArgcIncludesReceiver) {
+ argc_without_receiver = r6;
+ __ sub(argc_without_receiver, r0, Operand(kJSArgcReceiverSlots));
+ }
// Push the arguments. r4 and r5 will be modified.
- GenerateInterpreterPushArgs(masm, r0, r4, r5);
+ GenerateInterpreterPushArgs(masm, argc_without_receiver, r4, r5);
// Push a slot for the receiver to be constructed.
__ mov(r5, Operand::Zero());
@@ -1729,10 +1788,13 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. r0 contains the arguments count, the return value
// from LAZY is always the last argument.
- __ add(r0, r0, Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
+ constexpr int return_value_offset =
+ BuiltinContinuationFrameConstants::kFixedSlotCount -
+ kJSArgcReceiverSlots;
+ __ add(r0, r0, Operand(return_value_offset));
__ str(scratch, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Recover arguments count.
- __ sub(r0, r0, Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
+ __ sub(r0, r0, Operand(return_value_offset));
}
__ ldr(fp, MemOperand(
sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
@@ -1815,7 +1877,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
+ __ ldr(r1,
+ FieldMemOperand(r0, Code::kDeoptimizationDataOrInterpreterDataOffset));
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
@@ -1857,12 +1920,14 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadRoot(r5, RootIndex::kUndefinedValue);
__ mov(r2, r5);
__ ldr(r1, MemOperand(sp, 0)); // receiver
- __ cmp(r0, Operand(1));
+ __ cmp(r0, Operand(JSParameterCount(1)));
__ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg
- __ cmp(r0, Operand(2), ge);
+ __ cmp(r0, Operand(JSParameterCount(2)), ge);
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray
- __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ r0, r5, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1888,7 +1953,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// arguments to the receiver.
__ bind(&no_arguments);
{
- __ mov(r0, Operand(0));
+ __ mov(r0, Operand(JSParameterCount(0)));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
@@ -1902,7 +1967,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// r0: actual number of arguments
{
Label done;
- __ cmp(r0, Operand::Zero());
+ __ cmp(r0, Operand(JSParameterCount(0)));
__ b(ne, &done);
__ PushRoot(RootIndex::kUndefinedValue);
__ add(r0, r0, Operand(1));
@@ -1932,14 +1997,16 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadRoot(r1, RootIndex::kUndefinedValue);
__ mov(r5, r1);
__ mov(r2, r1);
- __ cmp(r0, Operand(1));
+ __ cmp(r0, Operand(JSParameterCount(1)));
__ ldr(r1, MemOperand(sp, kSystemPointerSize), ge); // target
- __ cmp(r0, Operand(2), ge);
+ __ cmp(r0, Operand(JSParameterCount(2)), ge);
__ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument
- __ cmp(r0, Operand(3), ge);
+ __ cmp(r0, Operand(JSParameterCount(3)), ge);
__ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList
- __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ r0, r5, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1974,15 +2041,17 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadRoot(r1, RootIndex::kUndefinedValue);
__ mov(r2, r1);
__ mov(r4, r1);
- __ cmp(r0, Operand(1));
+ __ cmp(r0, Operand(JSParameterCount(1)));
__ ldr(r1, MemOperand(sp, kSystemPointerSize), ge); // target
__ mov(r3, r1); // new.target defaults to target
- __ cmp(r0, Operand(2), ge);
+ __ cmp(r0, Operand(JSParameterCount(2)), ge);
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList
- __ cmp(r0, Operand(3), ge);
+ __ cmp(r0, Operand(JSParameterCount(3)), ge);
__ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target
- __ DropArgumentsAndPushNewReceiver(r0, r4, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ r0, r4, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -2005,13 +2074,55 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+namespace {
+
+// Allocate new stack space for |count| arguments and shift all existing
+// arguments already on the stack. |pointer_to_new_space_out| points to the
+// first free slot on the stack to copy additional arguments to and
+// |argc_in_out| is updated to include |count|.
+void Generate_AllocateSpaceAndShiftExistingArguments(
+ MacroAssembler* masm, Register count, Register argc_in_out,
+ Register pointer_to_new_space_out, Register scratch1, Register scratch2) {
+ DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
+ scratch2));
+ UseScratchRegisterScope temps(masm);
+ Register old_sp = scratch1;
+ Register new_space = scratch2;
+ __ mov(old_sp, sp);
+ __ lsl(new_space, count, Operand(kSystemPointerSizeLog2));
+ __ AllocateStackSpace(new_space);
+
+ Register end = scratch2;
+ Register value = temps.Acquire();
+ Register dest = pointer_to_new_space_out;
+ __ mov(dest, sp);
+ __ add(end, old_sp, Operand(argc_in_out, LSL, kSystemPointerSizeLog2));
+ Label loop, done;
+ __ bind(&loop);
+ __ cmp(old_sp, end);
+ if (kJSArgcIncludesReceiver) {
+ __ b(ge, &done);
+ } else {
+ __ b(gt, &done);
+ }
+ __ ldr(value, MemOperand(old_sp, kSystemPointerSize, PostIndex));
+ __ str(value, MemOperand(dest, kSystemPointerSize, PostIndex));
+ __ b(&loop);
+ __ bind(&done);
+
+ // Update total number of arguments.
+ __ add(argc_in_out, argc_in_out, count);
+}
+
+} // namespace
+
// static
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- r1 : target
- // -- r0 : number of parameters on the stack (not including the receiver)
+ // -- r0 : number of parameters on the stack
// -- r2 : arguments list (a FixedArray)
// -- r4 : len (number of elements to push from args)
// -- r3 : new.target (for [[Construct]])
@@ -2042,23 +2153,10 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Move the arguments already in the stack,
// including the receiver and the return address.
- {
- Label copy, check;
- Register num = r5, src = r6, dest = r9; // r7 and r8 are context and root.
- __ mov(src, sp);
- // Update stack pointer.
- __ lsl(scratch, r4, Operand(kSystemPointerSizeLog2));
- __ AllocateStackSpace(scratch);
- __ mov(dest, sp);
- __ mov(num, r0);
- __ b(&check);
- __ bind(&copy);
- __ ldr(scratch, MemOperand(src, kSystemPointerSize, PostIndex));
- __ str(scratch, MemOperand(dest, kSystemPointerSize, PostIndex));
- __ sub(num, num, Operand(1), SetCC);
- __ bind(&check);
- __ b(ge, &copy);
- }
+ // r4: Number of arguments to make room for.
+ // r0: Number of arguments already on the stack.
+ // r9: Points to first free slot on the stack after arguments were shifted.
+ Generate_AllocateSpaceAndShiftExistingArguments(masm, r4, r0, r9, r5, r6);
// Copy arguments onto the stack (thisArgument is already on the stack).
{
@@ -2077,7 +2175,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ add(r6, r6, Operand(1));
__ b(&loop);
__ bind(&done);
- __ add(r0, r0, r6);
}
// Tail-call to the actual Call or Construct builtin.
@@ -2092,7 +2189,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r3 : the new.target (for [[Construct]] calls)
// -- r1 : the target to call (can be any Object)
// -- r2 : start index (to support rest parameters)
@@ -2120,12 +2217,14 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Label stack_done, stack_overflow;
__ ldr(r5, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ sub(r5, r5, Operand(kJSArgcReceiverSlots));
+ }
__ sub(r5, r5, r2, SetCC);
__ b(le, &stack_done);
{
// ----------- S t a t e -------------
- // -- r0 : the number of arguments already in the stack (not including the
- // receiver)
+ // -- r0 : the number of arguments already in the stack
// -- r1 : the target to call (can be any Object)
// -- r2 : start index (to support rest parameters)
// -- r3 : the new.target (for [[Construct]] calls)
@@ -2145,30 +2244,17 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Move the arguments already in the stack,
// including the receiver and the return address.
- {
- Label copy, check;
- Register num = r8, src = r9,
- dest = r2; // r7 and r10 are context and root.
- __ mov(src, sp);
- // Update stack pointer.
- __ lsl(scratch, r5, Operand(kSystemPointerSizeLog2));
- __ AllocateStackSpace(scratch);
- __ mov(dest, sp);
- __ mov(num, r0);
- __ b(&check);
- __ bind(&copy);
- __ ldr(scratch, MemOperand(src, kSystemPointerSize, PostIndex));
- __ str(scratch, MemOperand(dest, kSystemPointerSize, PostIndex));
- __ sub(num, num, Operand(1), SetCC);
- __ bind(&check);
- __ b(ge, &copy);
- }
+ // r5: Number of arguments to make room for.
+ // r0: Number of arguments already on the stack.
+ // r2: Points to first free slot on the stack after arguments were shifted.
+ Generate_AllocateSpaceAndShiftExistingArguments(masm, r5, r0, r2, scratch,
+ r8);
+
// Copy arguments from the caller frame.
// TODO(victorgomes): Consider using forward order as potentially more cache
// friendly.
{
Label loop;
- __ add(r0, r0, r5);
__ bind(&loop);
{
__ sub(r5, r5, Operand(1), SetCC);
@@ -2191,7 +2277,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the function to call (checked to be a JSFunction)
// -----------------------------------
__ AssertFunction(r1);
@@ -2216,7 +2302,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ b(ne, &done_convert);
{
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the function to call (checked to be a JSFunction)
// -- r2 : the shared function info.
// -- cp : the function context.
@@ -2268,7 +2354,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ bind(&done_convert);
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the function to call (checked to be a JSFunction)
// -- r2 : the shared function info.
// -- cp : the function context.
@@ -2292,7 +2378,7 @@ namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
ASM_CODE_COMMENT(masm);
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : target (checked to be a JSBoundFunction)
// -- r3 : new.target (only in case of [[Construct]])
// -----------------------------------
@@ -2306,7 +2392,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ b(eq, &no_bound_arguments);
{
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : target (checked to be a JSBoundFunction)
// -- r2 : the [[BoundArguments]] (implemented as FixedArray)
// -- r3 : new.target (only in case of [[Construct]])
@@ -2370,7 +2456,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(r1);
@@ -2391,7 +2477,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the target to call (can be any Object).
// -----------------------------------
@@ -2438,7 +2524,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the constructor to call (checked to be a JSFunction)
// -- r3 : the new target (checked to be a constructor)
// -----------------------------------
@@ -2468,7 +2554,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the function to call (checked to be a JSBoundFunction)
// -- r3 : the new target (checked to be a constructor)
// -----------------------------------
@@ -2491,7 +2577,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the constructor to call (can be any Object)
// -- r3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
@@ -2777,12 +2863,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ cmp(cp, Operand(0));
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);
@@ -3504,7 +3584,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CompareObjectType(code_obj, r3, r3, BASELINE_DATA_TYPE);
+ __ CompareObjectType(code_obj, r3, r3, CODET_TYPE);
__ b(eq, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -3517,13 +3597,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
- __ CompareObjectType(code_obj, r3, r3, BASELINE_DATA_TYPE);
+ __ CompareObjectType(code_obj, r3, r3, CODET_TYPE);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
- // Load baseline code from baseline data.
- __ ldr(code_obj,
- FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, r3);
+ }
// Load the feedback vector.
Register feedback_vector = r2;
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index b1f9a63e3c..ac34e17354 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -112,10 +112,12 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ SmiTag(x11, argc);
__ Push(x11, padreg);
- // Add a slot for the receiver, and round up to maintain alignment.
+ // Add a slot for the receiver (if not already included), and round up to
+ // maintain alignment.
Register slot_count = x2;
Register slot_count_without_rounding = x12;
- __ Add(slot_count_without_rounding, argc, 2);
+ constexpr int additional_slots = kJSArgcIncludesReceiver ? 1 : 2;
+ __ Add(slot_count_without_rounding, argc, additional_slots);
__ Bic(slot_count, slot_count_without_rounding, 1);
__ Claim(slot_count);
@@ -128,7 +130,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Store padding, if needed.
__ Tbnz(slot_count_without_rounding, 0, &already_aligned);
- __ Str(padreg, MemOperand(x2, 1 * kSystemPointerSize));
+ __ Str(padreg,
+ MemOperand(x2, kJSArgcIncludesReceiver ? 0 : kSystemPointerSize));
__ Bind(&already_aligned);
// TODO(victorgomes): When the arguments adaptor is completely removed, we
@@ -148,7 +151,11 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Add(src, fp,
StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize); // Skip receiver.
- __ Mov(count, argc);
+ if (kJSArgcIncludesReceiver) {
+ __ Sub(count, argc, kJSArgcReceiverSlots);
+ } else {
+ __ Mov(count, argc);
+ }
__ CopyDoubleWords(dst, src, count);
}
@@ -190,7 +197,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
- __ DropArguments(x1, TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(x1, kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ Ret();
__ Bind(&stack_overflow);
@@ -311,6 +320,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Round the number of arguments down to the next even number, and claim
// slots for the arguments. If the number of arguments was odd, the last
// argument will overwrite one of the receivers pushed above.
+ Register argc_without_receiver = x12;
+ if (kJSArgcIncludesReceiver) {
+ argc_without_receiver = x11;
+ __ Sub(argc_without_receiver, x12, kJSArgcReceiverSlots);
+ }
__ Bic(x10, x12, 1);
// Check if we have enough stack space to push all arguments.
@@ -328,7 +342,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Register count = x2;
Register dst = x10;
Register src = x11;
- __ Mov(count, x12);
+ __ Mov(count, argc_without_receiver);
__ Poke(x0, 0); // Add the receiver.
__ SlotAddress(dst, 1); // Skip receiver.
__ Add(src, fp,
@@ -374,7 +388,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Leave construct frame.
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- __ DropArguments(x1, TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(x1, kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ Ret();
// Otherwise we do a smi check and fall through to check if the return value
@@ -414,6 +430,21 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ Unreachable();
}
+static void AssertCodeIsBaselineAllowClobber(MacroAssembler* masm,
+ Register code, Register scratch) {
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ Ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ Cmp(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
+ __ Assert(eq, AbortReason::kExpectedBaselineData);
+}
+
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ return AssertCodeIsBaselineAllowClobber(masm, code, scratch);
+}
+
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
// the more general dispatch.
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
@@ -422,8 +453,21 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label* is_baseline) {
ASM_CODE_COMMENT(masm);
Label done;
- __ CompareObjectType(sfi_data, scratch1, scratch1, BASELINE_DATA_TYPE);
- __ B(eq, is_baseline);
+ __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ B(ne, &not_baseline);
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ __ LoadCodeDataContainerCodeNonBuiltin(scratch1, sfi_data);
+ AssertCodeIsBaselineAllowClobber(masm, scratch1, scratch1);
+ } else {
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ }
+ __ B(eq, is_baseline);
+ __ Bind(&not_baseline);
+ } else {
+ __ B(eq, is_baseline);
+ }
__ Cmp(scratch1, INTERPRETER_DATA_TYPE);
__ B(ne, &done);
__ LoadTaggedPointerField(
@@ -485,12 +529,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Ldrh(w10, FieldMemOperand(
x10, SharedFunctionInfo::kFormalParameterCountOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ Sub(x10, x10, kJSArgcReceiverSlots);
+ }
// Claim slots for arguments and receiver (rounded up to a multiple of two).
__ Add(x11, x10, 2);
__ Bic(x11, x11, 1);
__ Claim(x11);
- // Store padding (which might be replaced by the receiver).
+ // Store padding (which might be replaced by the last argument).
__ Sub(x11, x11, 1);
__ Poke(padreg, Operand(x11, LSL, kSystemPointerSizeLog2));
@@ -855,9 +902,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
masm->isolate()));
__ Ldr(cp, MemOperand(scratch));
- // Claim enough space for the arguments, the receiver and the function,
- // including an optional slot of padding.
- __ Add(slots_to_claim, argc, 3);
+ // Claim enough space for the arguments, the function and the receiver (if
+ // it is not included in argc already), including an optional slot of
+ // padding.
+ constexpr int additional_slots = kJSArgcIncludesReceiver ? 2 : 3;
+ __ Add(slots_to_claim, argc, additional_slots);
__ Bic(slots_to_claim, slots_to_claim, 1);
// Check if we have enough stack space to push all arguments.
@@ -880,7 +929,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Poke(receiver, 0);
// Store function on the stack.
__ SlotAddress(scratch, argc);
- __ Str(function, MemOperand(scratch, kSystemPointerSize));
+ __ Str(
+ function,
+ MemOperand(scratch, kJSArgcIncludesReceiver ? 0 : kSystemPointerSize));
// Copy arguments to the stack in a loop, in reverse order.
// x4: argc.
@@ -888,7 +939,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
Label loop, done;
// Skip the argument set up if we have no arguments.
- __ Cbz(argc, &done);
+ if (kJSArgcIncludesReceiver) {
+ __ Cmp(argc, JSParameterCount(0));
+ __ B(eq, &done);
+ } else {
+ __ Cbz(argc, &done);
+ }
// scratch has been set to point to the location of the function, which
// marks the end of the argument copy.
@@ -902,7 +958,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Str(x11, MemOperand(x0, kSystemPointerSize, PostIndex));
// Loop if we've not reached the end of copy marker.
__ Cmp(x0, scratch);
- __ B(le, &loop);
+ if (kJSArgcIncludesReceiver) {
+ __ B(lt, &loop);
+ } else {
+ __ B(le, &loop);
+ }
__ Bind(&done);
@@ -992,7 +1052,9 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ Ldr(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ lsl(actual_params_size, actual_params_size, kSystemPointerSizeLog2);
- __ Add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+ if (!kJSArgcIncludesReceiver) {
+ __ Add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+ }
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -1378,7 +1440,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// stack left to right.
//
// The live registers are:
-// - x0: actual argument count (not including the receiver)
+// - x0: actual argument count
// - x1: the JS function object being called.
// - x3: the incoming new target or generator object
// - cp: our context.
@@ -1614,9 +1676,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&has_optimized_code_or_marker);
// Load the baseline code into the closure.
- __ LoadTaggedPointerField(
- x2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BaselineData::kBaselineCodeOffset));
+ __ Move(x2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, x2, closure);
__ JumpCodeTObject(x2);
@@ -1643,7 +1703,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
Register last_arg_addr = x10;
Register stack_addr = x11;
Register slots_to_claim = x12;
- Register slots_to_copy = x13; // May include receiver, unlike num_args.
+ Register slots_to_copy = x13;
DCHECK(!AreAliased(num_args, first_arg_index, last_arg_addr, stack_addr,
slots_to_claim, slots_to_copy));
@@ -1651,15 +1711,17 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
DCHECK(!AreAliased(spread_arg_out, last_arg_addr, stack_addr, slots_to_claim,
slots_to_copy));
- // Add one slot for the receiver.
- __ Add(slots_to_claim, num_args, 1);
-
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Exclude final spread from slots to claim and the number of arguments.
- __ Sub(slots_to_claim, slots_to_claim, 1);
__ Sub(num_args, num_args, 1);
}
+ // Add receiver (if not already included in argc) and round up to an even
+ // number of slots.
+ constexpr int additional_slots = kJSArgcIncludesReceiver ? 1 : 2;
+ __ Add(slots_to_claim, num_args, additional_slots);
+ __ Bic(slots_to_claim, slots_to_claim, 1);
+
// Add a stack check before pushing arguments.
Label stack_overflow, done;
__ StackOverflowCheck(slots_to_claim, &stack_overflow);
@@ -1669,9 +1731,6 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
__ Unreachable();
__ Bind(&done);
- // Round up to an even number of slots and claim them.
- __ Add(slots_to_claim, slots_to_claim, 1);
- __ Bic(slots_to_claim, slots_to_claim, 1);
__ Claim(slots_to_claim);
{
@@ -1682,15 +1741,16 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
__ Poke(padreg, Operand(scratch, LSL, kSystemPointerSizeLog2));
}
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ Mov(slots_to_copy, num_args);
- __ SlotAddress(stack_addr, 1);
- } else {
- // If we're not given an explicit receiver to store, we'll need to copy it
- // together with the rest of the arguments.
+ const bool skip_receiver =
+ receiver_mode == ConvertReceiverMode::kNullOrUndefined;
+ if (kJSArgcIncludesReceiver && skip_receiver) {
+ __ Sub(slots_to_copy, num_args, kJSArgcReceiverSlots);
+ } else if (!kJSArgcIncludesReceiver && !skip_receiver) {
__ Add(slots_to_copy, num_args, 1);
- __ SlotAddress(stack_addr, 0);
+ } else {
+ __ Mov(slots_to_copy, num_args);
}
+ __ SlotAddress(stack_addr, skip_receiver ? 1 : 0);
__ Sub(last_arg_addr, first_arg_index,
Operand(slots_to_copy, LSL, kSystemPointerSizeLog2));
@@ -1718,7 +1778,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
InterpreterPushArgsMode mode) {
DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x2 : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
@@ -1749,7 +1809,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
- // -- x0 : argument count (not including receiver)
+ // -- x0 : argument count
// -- x3 : new target
// -- x1 : constructor to call
// -- x2 : allocation site feedback if available, undefined otherwise
@@ -1975,16 +2035,16 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
if (java_script_builtin && with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point. r0 contains the arguments count, the return value
+ // the LAZY deopt point. x0 contains the arguments count, the return value
// from LAZY is always the last argument.
- __ add(x0, x0,
- BuiltinContinuationFrameConstants::kCallerSPOffset /
- kSystemPointerSize);
+ constexpr int return_offset =
+ BuiltinContinuationFrameConstants::kCallerSPOffset /
+ kSystemPointerSize -
+ kJSArgcReceiverSlots;
+ __ add(x0, x0, return_offset);
__ Str(scratch, MemOperand(fp, x0, LSL, kSystemPointerSizeLog2));
// Recover argument count.
- __ sub(x0, x0,
- BuiltinContinuationFrameConstants::kCallerSPOffset /
- kSystemPointerSize);
+ __ sub(x0, x0, return_offset);
}
// Load builtin index (stored as a Smi) and use it to get the builtin start
@@ -2078,7 +2138,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
- x1, FieldMemOperand(x0, Code::kDeoptimizationDataOffset));
+ x1,
+ FieldMemOperand(x0, Code::kDeoptimizationDataOrInterpreterDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@@ -2133,14 +2194,16 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Mov(this_arg, undefined_value);
__ Mov(arg_array, undefined_value);
__ Peek(receiver, 0);
- __ Cmp(argc, Immediate(1));
+ __ Cmp(argc, Immediate(JSParameterCount(1)));
__ B(lt, &done);
__ Peek(this_arg, kSystemPointerSize);
__ B(eq, &done);
__ Peek(arg_array, 2 * kSystemPointerSize);
__ bind(&done);
}
- __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(argc, kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ PushArgument(this_arg);
// ----------- S t a t e -------------
@@ -2167,7 +2230,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// arguments to the receiver.
__ Bind(&no_arguments);
{
- __ Mov(x0, 0);
+ __ Mov(x0, JSParameterCount(0));
DCHECK_EQ(receiver, x1);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -2187,7 +2250,12 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
{
Label non_zero;
Register scratch = x10;
- __ Cbnz(argc, &non_zero);
+ if (kJSArgcIncludesReceiver) {
+ __ Cmp(argc, JSParameterCount(0));
+ __ B(gt, &non_zero);
+ } else {
+ __ Cbnz(argc, &non_zero);
+ }
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
// Overwrite receiver with undefined, which will be the new receiver.
// We do not need to overwrite the padding slot above it with anything.
@@ -2205,8 +2273,15 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Register copy_from = x10;
Register copy_to = x11;
Register count = x12;
- __ Mov(count, argc); // CopyDoubleWords changes the count argument.
- __ Tbz(argc, 0, &even);
+ UseScratchRegisterScope temps(masm);
+ Register argc_without_receiver = argc;
+ if (kJSArgcIncludesReceiver) {
+ argc_without_receiver = temps.AcquireX();
+ __ Sub(argc_without_receiver, argc, kJSArgcReceiverSlots);
+ }
+ // CopyDoubleWords changes the count argument.
+ __ Mov(count, argc_without_receiver);
+ __ Tbz(argc_without_receiver, 0, &even);
// Shift arguments one slot down on the stack (overwriting the original
// receiver).
@@ -2214,7 +2289,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Sub(copy_to, copy_from, kSystemPointerSize);
__ CopyDoubleWords(copy_to, copy_from, count);
// Overwrite the duplicated remaining last argument.
- __ Poke(padreg, Operand(argc, LSL, kXRegSizeLog2));
+ __ Poke(padreg, Operand(argc_without_receiver, LSL, kXRegSizeLog2));
__ B(&arguments_ready);
// Copy arguments one slot higher in memory, overwriting the original
@@ -2261,17 +2336,19 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Mov(target, undefined_value);
__ Mov(this_argument, undefined_value);
__ Mov(arguments_list, undefined_value);
- __ Cmp(argc, Immediate(1));
+ __ Cmp(argc, Immediate(JSParameterCount(1)));
__ B(lt, &done);
__ Peek(target, kSystemPointerSize);
__ B(eq, &done);
__ Peek(this_argument, 2 * kSystemPointerSize);
- __ Cmp(argc, Immediate(3));
+ __ Cmp(argc, Immediate(JSParameterCount(3)));
__ B(lt, &done);
__ Peek(arguments_list, 3 * kSystemPointerSize);
__ bind(&done);
}
- __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(argc, kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ PushArgument(this_argument);
// ----------- S t a t e -------------
@@ -2317,19 +2394,21 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Mov(target, undefined_value);
__ Mov(arguments_list, undefined_value);
__ Mov(new_target, undefined_value);
- __ Cmp(argc, Immediate(1));
+ __ Cmp(argc, Immediate(JSParameterCount(1)));
__ B(lt, &done);
__ Peek(target, kSystemPointerSize);
__ B(eq, &done);
__ Peek(arguments_list, 2 * kSystemPointerSize);
__ Mov(new_target, target); // new.target defaults to target
- __ Cmp(argc, Immediate(3));
+ __ Cmp(argc, Immediate(JSParameterCount(3)));
__ B(lt, &done);
__ Peek(new_target, 3 * kSystemPointerSize);
__ bind(&done);
}
- __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(argc, kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// Push receiver (undefined).
__ PushArgument(undefined_value);
@@ -2365,19 +2444,25 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
Register slots_to_copy = x10;
Register slots_to_claim = x12;
- __ Add(slots_to_copy, argc, 1); // Copy with receiver.
+ if (kJSArgcIncludesReceiver) {
+ __ Mov(slots_to_copy, argc);
+ } else {
+ __ Add(slots_to_copy, argc, 1); // Copy with receiver.
+ }
__ Mov(slots_to_claim, len);
__ Tbz(slots_to_claim, 0, &even);
- // Claim space we need. If argc is even, slots_to_claim = len + 1, as we need
- // one extra padding slot. If argc is odd, we know that the original arguments
- // will have a padding slot we can reuse (since len is odd), so
- // slots_to_claim = len - 1.
+ // Claim space we need. If argc (without receiver) is even, slots_to_claim =
+ // len + 1, as we need one extra padding slot. If argc (without receiver) is
+ // odd, we know that the original arguments will have a padding slot we can
+ // reuse (since len is odd), so slots_to_claim = len - 1.
{
Register scratch = x11;
__ Add(slots_to_claim, len, 1);
__ And(scratch, argc, 1);
- __ Eor(scratch, scratch, 1);
+ if (!kJSArgcIncludesReceiver) {
+ __ Eor(scratch, scratch, 1);
+ }
__ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
}
@@ -2404,7 +2489,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- x1 : target
- // -- x0 : number of parameters on the stack (not including the receiver)
+ // -- x0 : number of parameters on the stack
// -- x2 : arguments list (a FixedArray)
// -- x4 : len (number of elements to push from args)
// -- x3 : new.target (for [[Construct]])
@@ -2455,8 +2540,12 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// scenes and we want to avoid that in a loop.
// TODO(all): Consider using Ldp and Stp.
Register dst = x16;
- __ Add(dst, argc, Immediate(1)); // Consider the receiver as well.
- __ SlotAddress(dst, dst);
+ if (kJSArgcIncludesReceiver) {
+ __ SlotAddress(dst, argc);
+ } else {
+ __ Add(dst, argc, Immediate(1)); // Consider the receiver as well.
+ __ SlotAddress(dst, dst);
+ }
__ Add(argc, argc, len); // Update new argc.
__ Bind(&loop);
__ Sub(len, len, 1);
@@ -2479,7 +2568,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x3 : the new.target (for [[Construct]] calls)
// -- x1 : the target to call (can be any Object)
// -- x2 : start index (to support rest parameters)
@@ -2510,6 +2599,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Register len = x6;
Label stack_done, stack_overflow;
__ Ldr(len, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ Subs(len, len, kJSArgcReceiverSlots);
+ }
__ Subs(len, len, start_index);
__ B(le, &stack_done);
// Check for stack overflow.
@@ -2527,8 +2619,12 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ lsl(start_index, start_index, kSystemPointerSizeLog2);
__ Add(args_fp, args_fp, start_index);
// Point to the position to copy to.
- __ Add(x10, argc, 1);
- __ SlotAddress(dst, x10);
+ if (kJSArgcIncludesReceiver) {
+ __ SlotAddress(dst, argc);
+ } else {
+ __ Add(x10, argc, 1);
+ __ SlotAddress(dst, x10);
+ }
// Update total number of arguments.
__ Add(argc, argc, len);
__ CopyDoubleWords(dst, args_fp, len);
@@ -2547,7 +2643,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
ASM_LOCATION("Builtins::Generate_CallFunction");
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the function to call (checked to be a JSFunction)
// -----------------------------------
__ AssertFunction(x1);
@@ -2574,7 +2670,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
&done_convert);
{
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the function to call (checked to be a JSFunction)
// -- x2 : the shared function info.
// -- cp : the function context.
@@ -2625,7 +2721,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Bind(&done_convert);
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the function to call (checked to be a JSFunction)
// -- x2 : the shared function info.
// -- cp : the function context.
@@ -2649,7 +2745,7 @@ namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : target (checked to be a JSBoundFunction)
// -- x3 : new.target (only in case of [[Construct]])
// -----------------------------------
@@ -2666,7 +2762,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ Cbz(bound_argc, &no_bound_arguments);
{
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : target (checked to be a JSBoundFunction)
// -- x2 : the [[BoundArguments]] (implemented as FixedArray)
// -- x3 : new.target (only in case of [[Construct]])
@@ -2698,6 +2794,9 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
Register scratch = x10;
Register receiver = x14;
+ if (kJSArgcIncludesReceiver) {
+ __ Sub(argc, argc, kJSArgcReceiverSlots);
+ }
__ Add(total_argc, argc, bound_argc);
__ Peek(receiver, 0);
@@ -2766,7 +2865,11 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ Cbnz(counter, &loop);
}
// Update argc.
- __ Mov(argc, total_argc);
+ if (kJSArgcIncludesReceiver) {
+ __ Add(argc, total_argc, kJSArgcReceiverSlots);
+ } else {
+ __ Mov(argc, total_argc);
+ }
}
__ Bind(&no_bound_arguments);
}
@@ -2776,7 +2879,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(x1);
@@ -2799,7 +2902,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the target to call (can be any Object).
// -----------------------------------
@@ -2848,7 +2951,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the constructor to call (checked to be a JSFunction)
// -- x3 : the new target (checked to be a constructor)
// -----------------------------------
@@ -2879,7 +2982,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the function to call (checked to be a JSBoundFunction)
// -- x3 : the new target (checked to be a constructor)
// -----------------------------------
@@ -2908,7 +3011,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the constructor to call (can be any Object)
// -- x3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
@@ -3250,12 +3353,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Bind(&not_js_frame);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
{
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
UseScratchRegisterScope temps(masm);
@@ -4032,7 +4129,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CompareObjectType(code_obj, x3, x3, BASELINE_DATA_TYPE);
+ __ CompareObjectType(code_obj, x3, x3, CODET_TYPE);
__ B(eq, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -4045,16 +4142,16 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
- __ CompareObjectType(code_obj, x3, x3, BASELINE_DATA_TYPE);
+ __ CompareObjectType(code_obj, x3, x3, CODET_TYPE);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
- // Load baseline code from baseline data.
- __ LoadTaggedPointerField(
- code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
}
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, x3);
+ }
// Load the feedback vector.
Register feedback_vector = x2;
diff --git a/deps/v8/src/builtins/array-concat.tq b/deps/v8/src/builtins/array-concat.tq
index 5eb66e6ce8..6fad3e6683 100644
--- a/deps/v8/src/builtins/array-concat.tq
+++ b/deps/v8/src/builtins/array-concat.tq
@@ -43,7 +43,7 @@ ArrayPrototypeConcat(
// TODO(victorgomes): Implement slow path ArrayConcat in Torque.
tail ArrayConcat(
context, LoadTargetFromFrame(), Undefined,
- Convert<int32>(arguments.length));
+ Convert<int32>(arguments.actual_count));
}
} // namespace array
diff --git a/deps/v8/src/builtins/array-shift.tq b/deps/v8/src/builtins/array-shift.tq
index ed1087a85a..ea62b1c7a8 100644
--- a/deps/v8/src/builtins/array-shift.tq
+++ b/deps/v8/src/builtins/array-shift.tq
@@ -103,7 +103,7 @@ transitioning javascript builtin ArrayPrototypeShift(
} label Runtime {
tail ArrayShift(
context, LoadTargetFromFrame(), Undefined,
- Convert<int32>(arguments.length));
+ Convert<int32>(arguments.actual_count));
}
}
}
diff --git a/deps/v8/src/builtins/array-unshift.tq b/deps/v8/src/builtins/array-unshift.tq
index 7afeeb0627..69938ccaea 100644
--- a/deps/v8/src/builtins/array-unshift.tq
+++ b/deps/v8/src/builtins/array-unshift.tq
@@ -89,7 +89,7 @@ transitioning javascript builtin ArrayPrototypeUnshift(
tail ArrayUnshift(
context, LoadTargetFromFrame(), Undefined,
- Convert<int32>(arguments.length));
+ Convert<int32>(arguments.actual_count));
} label Slow {
return GenericArrayUnshift(context, receiver, arguments);
}
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 48eb954f83..75c3c194b9 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -45,13 +45,13 @@ void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() {
// See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map.
TNode<Object> ArrayBuiltinsAssembler::TypedArrayMapProcessor(
TNode<Object> k_value, TNode<UintPtrT> k) {
- // 8. c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »).
+ // 7c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »).
TNode<Number> k_number = ChangeUintPtrToTagged(k);
TNode<Object> mapped_value =
Call(context(), callbackfn(), this_arg(), k_value, k_number, o());
Label fast(this), slow(this), done(this), detached(this, Label::kDeferred);
- // 8. d. Perform ? Set(A, Pk, mapped_value, true).
+ // 7d. Perform ? Set(A, Pk, mapped_value, true).
// Since we know that A is a TypedArray, this always ends up in
// #sec-integer-indexed-exotic-objects-set-p-v-receiver and then
// tc39.github.io/ecma262/#sec-integerindexedelementset .
@@ -59,9 +59,9 @@ TNode<Object> ArrayBuiltinsAssembler::TypedArrayMapProcessor(
BIND(&fast);
// #sec-integerindexedelementset
- // 5. If arrayTypeName is "BigUint64Array" or "BigInt64Array", let
+ // 2. If arrayTypeName is "BigUint64Array" or "BigInt64Array", let
// numValue be ? ToBigInt(v).
- // 6. Otherwise, let numValue be ? ToNumber(value).
+ // 3. Otherwise, let numValue be ? ToNumber(value).
TNode<Object> num_value;
if (source_elements_kind_ == BIGINT64_ELEMENTS ||
source_elements_kind_ == BIGUINT64_ELEMENTS) {
@@ -175,24 +175,15 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
size_t i = 0;
for (auto it = labels.begin(); it != labels.end(); ++i, ++it) {
BIND(&*it);
- Label done(this);
source_elements_kind_ = static_cast<ElementsKind>(elements_kinds[i]);
- // TODO(turbofan): Silently cancelling the loop on buffer detachment is a
- // spec violation. Should go to &throw_detached and throw a TypeError
- // instead.
- VisitAllTypedArrayElements(array_buffer, processor, &done, direction,
- typed_array);
- Goto(&done);
- // No exception, return success
- BIND(&done);
+ VisitAllTypedArrayElements(array_buffer, processor, direction, typed_array);
ReturnFromBuiltin(a_.value());
}
}
void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
TNode<JSArrayBuffer> array_buffer, const CallResultProcessor& processor,
- Label* detached, ForEachDirection direction,
- TNode<JSTypedArray> typed_array) {
+ ForEachDirection direction, TNode<JSTypedArray> typed_array) {
VariableList list({&a_, &k_}, zone());
TNode<UintPtrT> start = UintPtrConstant(0);
@@ -208,12 +199,28 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
BuildFastLoop<UintPtrT>(
list, start, end,
[&](TNode<UintPtrT> index) {
- GotoIf(IsDetachedBuffer(array_buffer), detached);
- TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(typed_array);
- TNode<Numeric> value = LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, source_elements_kind_);
- k_ = index;
- a_ = processor(this, value, index);
+ TVARIABLE(Object, value);
+ Label detached(this, Label::kDeferred);
+ Label process(this);
+ GotoIf(IsDetachedBuffer(array_buffer), &detached);
+ {
+ TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(typed_array);
+ value = LoadFixedTypedArrayElementAsTagged(data_ptr, index,
+ source_elements_kind_);
+ Goto(&process);
+ }
+
+ BIND(&detached);
+ {
+ value = UndefinedConstant();
+ Goto(&process);
+ }
+
+ BIND(&process);
+ {
+ k_ = index;
+ a_ = processor(this, value.value(), index);
+ }
},
incr, advance_mode);
}
@@ -621,9 +628,9 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
Label is_smi(this), is_nonsmi(this), done(this);
// If no fromIndex was passed, default to 0.
- GotoIf(
- IntPtrLessThanOrEqual(args.GetLength(), IntPtrConstant(kFromIndexArg)),
- &done);
+ GotoIf(IntPtrLessThanOrEqual(args.GetLengthWithoutReceiver(),
+ IntPtrConstant(kFromIndexArg)),
+ &done);
TNode<Object> start_from = args.AtIndex(kFromIndexArg);
// Handle Smis and undefined here and everything else in runtime.
@@ -1774,11 +1781,13 @@ void ArrayBuiltinsAssembler::GenerateDispatchToArrayStub(
base::Optional<TNode<AllocationSite>> allocation_site) {
CodeStubArguments args(this, argc);
Label check_one_case(this), fallthrough(this);
- GotoIfNot(IntPtrEqual(args.GetLength(), IntPtrConstant(0)), &check_one_case);
+ GotoIfNot(IntPtrEqual(args.GetLengthWithoutReceiver(), IntPtrConstant(0)),
+ &check_one_case);
CreateArrayDispatchNoArgument(context, target, argc, mode, allocation_site);
BIND(&check_one_case);
- GotoIfNot(IntPtrEqual(args.GetLength(), IntPtrConstant(1)), &fallthrough);
+ GotoIfNot(IntPtrEqual(args.GetLengthWithoutReceiver(), IntPtrConstant(1)),
+ &fallthrough);
CreateArrayDispatchSingleArgument(context, target, argc, mode,
allocation_site);
diff --git a/deps/v8/src/builtins/builtins-array-gen.h b/deps/v8/src/builtins/builtins-array-gen.h
index 96833d9dea..1f169632bf 100644
--- a/deps/v8/src/builtins/builtins-array-gen.h
+++ b/deps/v8/src/builtins/builtins-array-gen.h
@@ -104,7 +104,7 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
private:
void VisitAllTypedArrayElements(TNode<JSArrayBuffer> array_buffer,
const CallResultProcessor& processor,
- Label* detached, ForEachDirection direction,
+ ForEachDirection direction,
TNode<JSTypedArray> typed_array);
TNode<Object> callbackfn_;
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index 3e87252673..c5b4eb9041 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -84,9 +84,8 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
// Compute the number of registers and parameters.
TNode<SharedFunctionInfo> shared = LoadObjectField<SharedFunctionInfo>(
closure, JSFunction::kSharedFunctionInfoOffset);
- TNode<IntPtrT> formal_parameter_count =
- ChangeInt32ToIntPtr(LoadObjectField<Uint16T>(
- shared, SharedFunctionInfo::kFormalParameterCountOffset));
+ TNode<IntPtrT> formal_parameter_count = ChangeInt32ToIntPtr(
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(shared));
TNode<BytecodeArray> bytecode_array =
LoadSharedFunctionInfoBytecodeArray(shared);
TNode<IntPtrT> frame_size = ChangeInt32ToIntPtr(LoadObjectField<Uint32T>(
diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
index 11dd73cd4a..f4af61b1a0 100644
--- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -137,9 +137,9 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
{
Label has_sent_value(this), no_sent_value(this), merge(this);
ScopedExceptionHandler handler(this, &reject_promise, &var_exception);
- Branch(
- IntPtrGreaterThan(args->GetLength(), IntPtrConstant(kValueOrReasonArg)),
- &has_sent_value, &no_sent_value);
+ Branch(IntPtrGreaterThan(args->GetLengthWithoutReceiver(),
+ IntPtrConstant(kValueOrReasonArg)),
+ &has_sent_value, &no_sent_value);
BIND(&has_sent_value);
{
iter_result = Call(context, method, sync_iterator, sent_value);
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index 54d2c74802..78003e71bd 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -274,7 +274,8 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
BIND(&if_done);
{
Label if_not_double(this), if_double(this);
- TNode<Int32T> args_count = Int32Constant(0); // args already on the stack
+ TNode<Int32T> args_count =
+ Int32Constant(i::JSParameterCount(0)); // args already on the stack
TNode<Int32T> length = var_length.value();
{
@@ -737,8 +738,8 @@ void CallOrConstructBuiltinsAssembler::CallFunctionTemplate(
TNode<RawPtrT> callback = LoadForeignForeignAddressPtr(foreign);
TNode<Object> call_data =
LoadObjectField<Object>(call_handler_info, CallHandlerInfo::kDataOffset);
- TailCallStub(CodeFactory::CallApiCallback(isolate()), context, callback, argc,
- call_data, holder);
+ TailCallStub(CodeFactory::CallApiCallback(isolate()), context, callback,
+ args.GetLengthWithoutReceiver(), call_data, holder);
}
TF_BUILTIN(CallFunctionTemplate_CheckAccess, CallOrConstructBuiltinsAssembler) {
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 0d677da854..23d7747491 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -589,10 +589,7 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::CreateShallowObjectLiteral(
BIND(&if_copy_elements);
CSA_ASSERT(this, Word32BinaryNot(
IsFixedCOWArrayMap(LoadMap(boilerplate_elements))));
- ExtractFixedArrayFlags flags;
- flags |= ExtractFixedArrayFlag::kAllFixedArrays;
- flags |= ExtractFixedArrayFlag::kNewSpaceAllocationOnly;
- flags |= ExtractFixedArrayFlag::kDontCopyCOW;
+ auto flags = ExtractFixedArrayFlag::kAllFixedArrays;
var_elements = CloneFixedArray(boilerplate_elements, flags);
Goto(&done);
BIND(&done);
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index 3ae331f5d7..465de8e982 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -102,7 +102,6 @@ BUILTIN(DataViewConstructor) {
// 13. Set O's [[ByteOffset]] internal slot to offset.
Handle<JSDataView>::cast(result)->set_byte_offset(view_byte_offset);
- Handle<JSDataView>::cast(result)->AllocateExternalPointerEntries(isolate);
Handle<JSDataView>::cast(result)->set_data_pointer(
isolate,
static_cast<uint8_t*>(array_buffer->backing_store()) + view_byte_offset);
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index 1de6357cf8..32c1f4b059 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -24,85 +24,6 @@ namespace internal {
namespace {
-// ES6 section 20.3.1.1 Time Values and Time Range
-const double kMinYear = -1000000.0;
-const double kMaxYear = -kMinYear;
-const double kMinMonth = -10000000.0;
-const double kMaxMonth = -kMinMonth;
-
-// 20.3.1.2 Day Number and Time within Day
-const double kMsPerDay = 86400000.0;
-
-// ES6 section 20.3.1.11 Hours, Minutes, Second, and Milliseconds
-const double kMsPerSecond = 1000.0;
-const double kMsPerMinute = 60000.0;
-const double kMsPerHour = 3600000.0;
-
-// ES6 section 20.3.1.14 MakeDate (day, time)
-double MakeDate(double day, double time) {
- if (std::isfinite(day) && std::isfinite(time)) {
- return time + day * kMsPerDay;
- }
- return std::numeric_limits<double>::quiet_NaN();
-}
-
-// ES6 section 20.3.1.13 MakeDay (year, month, date)
-double MakeDay(double year, double month, double date) {
- if ((kMinYear <= year && year <= kMaxYear) &&
- (kMinMonth <= month && month <= kMaxMonth) && std::isfinite(date)) {
- int y = FastD2I(year);
- int m = FastD2I(month);
- y += m / 12;
- m %= 12;
- if (m < 0) {
- m += 12;
- y -= 1;
- }
- DCHECK_LE(0, m);
- DCHECK_LT(m, 12);
-
- // kYearDelta is an arbitrary number such that:
- // a) kYearDelta = -1 (mod 400)
- // b) year + kYearDelta > 0 for years in the range defined by
- // ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
- // Jan 1 1970. This is required so that we don't run into integer
- // division of negative numbers.
- // c) there shouldn't be an overflow for 32-bit integers in the following
- // operations.
- static const int kYearDelta = 399999;
- static const int kBaseDay =
- 365 * (1970 + kYearDelta) + (1970 + kYearDelta) / 4 -
- (1970 + kYearDelta) / 100 + (1970 + kYearDelta) / 400;
- int day_from_year = 365 * (y + kYearDelta) + (y + kYearDelta) / 4 -
- (y + kYearDelta) / 100 + (y + kYearDelta) / 400 -
- kBaseDay;
- if ((y % 4 != 0) || (y % 100 == 0 && y % 400 != 0)) {
- static const int kDayFromMonth[] = {0, 31, 59, 90, 120, 151,
- 181, 212, 243, 273, 304, 334};
- day_from_year += kDayFromMonth[m];
- } else {
- static const int kDayFromMonth[] = {0, 31, 60, 91, 121, 152,
- 182, 213, 244, 274, 305, 335};
- day_from_year += kDayFromMonth[m];
- }
- return static_cast<double>(day_from_year - 1) + DoubleToInteger(date);
- }
- return std::numeric_limits<double>::quiet_NaN();
-}
-
-// ES6 section 20.3.1.12 MakeTime (hour, min, sec, ms)
-double MakeTime(double hour, double min, double sec, double ms) {
- if (std::isfinite(hour) && std::isfinite(min) && std::isfinite(sec) &&
- std::isfinite(ms)) {
- double const h = DoubleToInteger(hour);
- double const m = DoubleToInteger(min);
- double const s = DoubleToInteger(sec);
- double const milli = DoubleToInteger(ms);
- return h * kMsPerHour + m * kMsPerMinute + s * kMsPerSecond + milli;
- }
- return std::numeric_limits<double>::quiet_NaN();
-}
-
const char* kShortWeekDays[] = {"Sun", "Mon", "Tue", "Wed",
"Thu", "Fri", "Sat"};
const char* kShortMonths[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 70eb349dab..0c89b0e45a 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -41,20 +41,28 @@ namespace internal {
TFC(EphemeronKeyBarrierIgnoreFP, WriteBarrier) \
\
/* TSAN support for stores in generated code.*/ \
- IF_TSAN(TFC, TSANRelaxedStore8IgnoreFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore8SaveFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore16IgnoreFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore16SaveFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore32IgnoreFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore32SaveFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore64IgnoreFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore64SaveFP, TSANRelaxedStore) \
+ IF_TSAN(TFC, TSANRelaxedStore8IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore8SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore16IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore16SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore32IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore32SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore64IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore64SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore8IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore8SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore16IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore16SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore32IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore32SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore64IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore64SaveFP, TSANStore) \
\
/* TSAN support for loads in generated code.*/ \
- IF_TSAN(TFC, TSANRelaxedLoad32IgnoreFP, TSANRelaxedLoad) \
- IF_TSAN(TFC, TSANRelaxedLoad32SaveFP, TSANRelaxedLoad) \
- IF_TSAN(TFC, TSANRelaxedLoad64IgnoreFP, TSANRelaxedLoad) \
- IF_TSAN(TFC, TSANRelaxedLoad64SaveFP, TSANRelaxedLoad) \
+ IF_TSAN(TFC, TSANRelaxedLoad32IgnoreFP, TSANLoad) \
+ IF_TSAN(TFC, TSANRelaxedLoad32SaveFP, TSANLoad) \
+ IF_TSAN(TFC, TSANRelaxedLoad64IgnoreFP, TSANLoad) \
+ IF_TSAN(TFC, TSANRelaxedLoad64SaveFP, TSANLoad) \
\
/* Adaptor for CPP builtin */ \
TFC(AdaptorWithBuiltinExitFrame, CppBuiltinAdaptor) \
@@ -302,7 +310,7 @@ namespace internal {
CPP(Illegal) \
CPP(StrictPoisonPillThrower) \
CPP(UnsupportedThrower) \
- TFJ(ReturnReceiver, 0, kReceiver) \
+ TFJ(ReturnReceiver, kJSArgcReceiverSlots, kReceiver) \
\
/* Array */ \
TFC(ArrayConstructor, JSTrampoline) \
@@ -373,13 +381,13 @@ namespace internal {
TFS(CloneFastJSArrayFillingHoles, kSource) \
TFS(ExtractFastJSArray, kSource, kBegin, kCount) \
/* ES6 #sec-array.prototype.entries */ \
- TFJ(ArrayPrototypeEntries, 0, kReceiver) \
+ TFJ(ArrayPrototypeEntries, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-array.prototype.keys */ \
- TFJ(ArrayPrototypeKeys, 0, kReceiver) \
+ TFJ(ArrayPrototypeKeys, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-array.prototype.values */ \
- TFJ(ArrayPrototypeValues, 0, kReceiver) \
+ TFJ(ArrayPrototypeValues, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-%arrayiteratorprototype%.next */ \
- TFJ(ArrayIteratorPrototypeNext, 0, kReceiver) \
+ TFJ(ArrayIteratorPrototypeNext, kJSArgcReceiverSlots, kReceiver) \
/* https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray */ \
TFS(FlattenIntoArray, kTarget, kSource, kSourceLength, kStart, kDepth) \
TFS(FlatMapIntoArray, kTarget, kSource, kSourceLength, kStart, kDepth, \
@@ -404,8 +412,10 @@ namespace internal {
TFC(AsyncFunctionLazyDeoptContinuation, AsyncFunctionStackParameter) \
TFS(AsyncFunctionAwaitCaught, kAsyncFunctionObject, kValue) \
TFS(AsyncFunctionAwaitUncaught, kAsyncFunctionObject, kValue) \
- TFJ(AsyncFunctionAwaitRejectClosure, 1, kReceiver, kSentError) \
- TFJ(AsyncFunctionAwaitResolveClosure, 1, kReceiver, kSentValue) \
+ TFJ(AsyncFunctionAwaitRejectClosure, kJSArgcReceiverSlots + 1, kReceiver, \
+ kSentError) \
+ TFJ(AsyncFunctionAwaitResolveClosure, kJSArgcReceiverSlots + 1, kReceiver, \
+ kSentValue) \
\
/* BigInt */ \
CPP(BigIntConstructor) \
@@ -471,45 +481,45 @@ namespace internal {
/* ES #sec-date-constructor */ \
CPP(DateConstructor) \
/* ES6 #sec-date.prototype.getdate */ \
- TFJ(DatePrototypeGetDate, 0, kReceiver) \
+ TFJ(DatePrototypeGetDate, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getday */ \
- TFJ(DatePrototypeGetDay, 0, kReceiver) \
+ TFJ(DatePrototypeGetDay, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getfullyear */ \
- TFJ(DatePrototypeGetFullYear, 0, kReceiver) \
+ TFJ(DatePrototypeGetFullYear, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.gethours */ \
- TFJ(DatePrototypeGetHours, 0, kReceiver) \
+ TFJ(DatePrototypeGetHours, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getmilliseconds */ \
- TFJ(DatePrototypeGetMilliseconds, 0, kReceiver) \
+ TFJ(DatePrototypeGetMilliseconds, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getminutes */ \
- TFJ(DatePrototypeGetMinutes, 0, kReceiver) \
+ TFJ(DatePrototypeGetMinutes, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getmonth */ \
- TFJ(DatePrototypeGetMonth, 0, kReceiver) \
+ TFJ(DatePrototypeGetMonth, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getseconds */ \
- TFJ(DatePrototypeGetSeconds, 0, kReceiver) \
+ TFJ(DatePrototypeGetSeconds, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.gettime */ \
- TFJ(DatePrototypeGetTime, 0, kReceiver) \
+ TFJ(DatePrototypeGetTime, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.gettimezoneoffset */ \
- TFJ(DatePrototypeGetTimezoneOffset, 0, kReceiver) \
+ TFJ(DatePrototypeGetTimezoneOffset, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcdate */ \
- TFJ(DatePrototypeGetUTCDate, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCDate, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcday */ \
- TFJ(DatePrototypeGetUTCDay, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCDay, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcfullyear */ \
- TFJ(DatePrototypeGetUTCFullYear, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCFullYear, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutchours */ \
- TFJ(DatePrototypeGetUTCHours, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCHours, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcmilliseconds */ \
- TFJ(DatePrototypeGetUTCMilliseconds, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCMilliseconds, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcminutes */ \
- TFJ(DatePrototypeGetUTCMinutes, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCMinutes, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcmonth */ \
- TFJ(DatePrototypeGetUTCMonth, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCMonth, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcseconds */ \
- TFJ(DatePrototypeGetUTCSeconds, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCSeconds, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.valueof */ \
- TFJ(DatePrototypeValueOf, 0, kReceiver) \
+ TFJ(DatePrototypeValueOf, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype-@@toprimitive */ \
- TFJ(DatePrototypeToPrimitive, 1, kReceiver, kHint) \
+ TFJ(DatePrototypeToPrimitive, kJSArgcReceiverSlots + 1, kReceiver, kHint) \
CPP(DatePrototypeGetYear) \
CPP(DatePrototypeSetYear) \
CPP(DateNow) \
@@ -578,9 +588,9 @@ namespace internal {
CPP(GlobalUnescape) \
CPP(GlobalEval) \
/* ES6 #sec-isfinite-number */ \
- TFJ(GlobalIsFinite, 1, kReceiver, kNumber) \
+ TFJ(GlobalIsFinite, kJSArgcReceiverSlots + 1, kReceiver, kNumber) \
/* ES6 #sec-isnan-number */ \
- TFJ(GlobalIsNaN, 1, kReceiver, kNumber) \
+ TFJ(GlobalIsNaN, kJSArgcReceiverSlots + 1, kReceiver, kNumber) \
\
/* JSON */ \
CPP(JsonParse) \
@@ -643,23 +653,23 @@ namespace internal {
/* Map */ \
TFS(FindOrderedHashMapEntry, kTable, kKey) \
TFJ(MapConstructor, kDontAdaptArgumentsSentinel) \
- TFJ(MapPrototypeSet, 2, kReceiver, kKey, kValue) \
- TFJ(MapPrototypeDelete, 1, kReceiver, kKey) \
- TFJ(MapPrototypeGet, 1, kReceiver, kKey) \
- TFJ(MapPrototypeHas, 1, kReceiver, kKey) \
+ TFJ(MapPrototypeSet, kJSArgcReceiverSlots + 2, kReceiver, kKey, kValue) \
+ TFJ(MapPrototypeDelete, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(MapPrototypeGet, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(MapPrototypeHas, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
CPP(MapPrototypeClear) \
/* ES #sec-map.prototype.entries */ \
- TFJ(MapPrototypeEntries, 0, kReceiver) \
+ TFJ(MapPrototypeEntries, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-get-map.prototype.size */ \
- TFJ(MapPrototypeGetSize, 0, kReceiver) \
+ TFJ(MapPrototypeGetSize, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-map.prototype.forEach */ \
TFJ(MapPrototypeForEach, kDontAdaptArgumentsSentinel) \
/* ES #sec-map.prototype.keys */ \
- TFJ(MapPrototypeKeys, 0, kReceiver) \
+ TFJ(MapPrototypeKeys, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-map.prototype.values */ \
- TFJ(MapPrototypeValues, 0, kReceiver) \
+ TFJ(MapPrototypeValues, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-%mapiteratorprototype%.next */ \
- TFJ(MapIteratorPrototypeNext, 0, kReceiver) \
+ TFJ(MapIteratorPrototypeNext, kJSArgcReceiverSlots, kReceiver) \
TFS(MapIteratorToList, kSource) \
\
/* ES #sec-number-constructor */ \
@@ -731,28 +741,30 @@ namespace internal {
CPP(ObjectDefineProperties) \
CPP(ObjectDefineProperty) \
CPP(ObjectDefineSetter) \
- TFJ(ObjectEntries, 1, kReceiver, kObject) \
+ TFJ(ObjectEntries, kJSArgcReceiverSlots + 1, kReceiver, kObject) \
CPP(ObjectFreeze) \
TFJ(ObjectGetOwnPropertyDescriptor, kDontAdaptArgumentsSentinel) \
CPP(ObjectGetOwnPropertyDescriptors) \
- TFJ(ObjectGetOwnPropertyNames, 1, kReceiver, kObject) \
+ TFJ(ObjectGetOwnPropertyNames, kJSArgcReceiverSlots + 1, kReceiver, kObject) \
CPP(ObjectGetOwnPropertySymbols) \
- TFJ(ObjectHasOwn, 2, kReceiver, kObject, kKey) \
- TFJ(ObjectIs, 2, kReceiver, kLeft, kRight) \
+ TFJ(ObjectHasOwn, kJSArgcReceiverSlots + 2, kReceiver, kObject, kKey) \
+ TFJ(ObjectIs, kJSArgcReceiverSlots + 2, kReceiver, kLeft, kRight) \
CPP(ObjectIsFrozen) \
CPP(ObjectIsSealed) \
- TFJ(ObjectKeys, 1, kReceiver, kObject) \
+ TFJ(ObjectKeys, kJSArgcReceiverSlots + 1, kReceiver, kObject) \
CPP(ObjectLookupGetter) \
CPP(ObjectLookupSetter) \
/* ES6 #sec-object.prototype.hasownproperty */ \
- TFJ(ObjectPrototypeHasOwnProperty, 1, kReceiver, kKey) \
- TFJ(ObjectPrototypeIsPrototypeOf, 1, kReceiver, kValue) \
+ TFJ(ObjectPrototypeHasOwnProperty, kJSArgcReceiverSlots + 1, kReceiver, \
+ kKey) \
+ TFJ(ObjectPrototypeIsPrototypeOf, kJSArgcReceiverSlots + 1, kReceiver, \
+ kValue) \
CPP(ObjectPrototypePropertyIsEnumerable) \
CPP(ObjectPrototypeGetProto) \
CPP(ObjectPrototypeSetProto) \
CPP(ObjectSeal) \
TFS(ObjectToString, kReceiver) \
- TFJ(ObjectValues, 1, kReceiver, kObject) \
+ TFJ(ObjectValues, kJSArgcReceiverSlots + 1, kReceiver, kObject) \
\
/* instanceof */ \
TFC(OrdinaryHasInstance, Compare) \
@@ -784,14 +796,16 @@ namespace internal {
CPP(RegExpCapture8Getter) \
CPP(RegExpCapture9Getter) \
/* ES #sec-regexp-pattern-flags */ \
- TFJ(RegExpConstructor, 2, kReceiver, kPattern, kFlags) \
+ TFJ(RegExpConstructor, kJSArgcReceiverSlots + 2, kReceiver, kPattern, \
+ kFlags) \
CPP(RegExpInputGetter) \
CPP(RegExpInputSetter) \
CPP(RegExpLastMatchGetter) \
CPP(RegExpLastParenGetter) \
CPP(RegExpLeftContextGetter) \
/* ES #sec-regexp.prototype.compile */ \
- TFJ(RegExpPrototypeCompile, 2, kReceiver, kPattern, kFlags) \
+ TFJ(RegExpPrototypeCompile, kJSArgcReceiverSlots + 2, kReceiver, kPattern, \
+ kFlags) \
CPP(RegExpPrototypeToString) \
CPP(RegExpRightContextGetter) \
\
@@ -803,20 +817,20 @@ namespace internal {
\
/* Set */ \
TFJ(SetConstructor, kDontAdaptArgumentsSentinel) \
- TFJ(SetPrototypeHas, 1, kReceiver, kKey) \
- TFJ(SetPrototypeAdd, 1, kReceiver, kKey) \
- TFJ(SetPrototypeDelete, 1, kReceiver, kKey) \
+ TFJ(SetPrototypeHas, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(SetPrototypeAdd, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(SetPrototypeDelete, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
CPP(SetPrototypeClear) \
/* ES #sec-set.prototype.entries */ \
- TFJ(SetPrototypeEntries, 0, kReceiver) \
+ TFJ(SetPrototypeEntries, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-get-set.prototype.size */ \
- TFJ(SetPrototypeGetSize, 0, kReceiver) \
+ TFJ(SetPrototypeGetSize, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-set.prototype.foreach */ \
TFJ(SetPrototypeForEach, kDontAdaptArgumentsSentinel) \
/* ES #sec-set.prototype.values */ \
- TFJ(SetPrototypeValues, 0, kReceiver) \
+ TFJ(SetPrototypeValues, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-%setiteratorprototype%.next */ \
- TFJ(SetIteratorPrototypeNext, 0, kReceiver) \
+ TFJ(SetIteratorPrototypeNext, kJSArgcReceiverSlots, kReceiver) \
TFS(SetOrSetIteratorToList, kSource) \
\
/* SharedArrayBuffer */ \
@@ -825,16 +839,18 @@ namespace internal {
/* https://tc39.es/proposal-resizablearraybuffer/ */ \
CPP(SharedArrayBufferPrototypeGrow) \
\
- TFJ(AtomicsLoad, 2, kReceiver, kArray, kIndex) \
- TFJ(AtomicsStore, 3, kReceiver, kArray, kIndex, kValue) \
- TFJ(AtomicsExchange, 3, kReceiver, kArray, kIndex, kValue) \
- TFJ(AtomicsCompareExchange, 4, kReceiver, kArray, kIndex, kOldValue, \
- kNewValue) \
- TFJ(AtomicsAdd, 3, kReceiver, kArray, kIndex, kValue) \
- TFJ(AtomicsSub, 3, kReceiver, kArray, kIndex, kValue) \
- TFJ(AtomicsAnd, 3, kReceiver, kArray, kIndex, kValue) \
- TFJ(AtomicsOr, 3, kReceiver, kArray, kIndex, kValue) \
- TFJ(AtomicsXor, 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsLoad, kJSArgcReceiverSlots + 2, kReceiver, kArray, kIndex) \
+ TFJ(AtomicsStore, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, \
+ kValue) \
+ TFJ(AtomicsExchange, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, \
+ kValue) \
+ TFJ(AtomicsCompareExchange, kJSArgcReceiverSlots + 4, kReceiver, kArray, \
+ kIndex, kOldValue, kNewValue) \
+ TFJ(AtomicsAdd, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsSub, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsAnd, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsOr, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsXor, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, kValue) \
CPP(AtomicsNotify) \
CPP(AtomicsIsLockFree) \
CPP(AtomicsWait) \
@@ -848,11 +864,12 @@ namespace internal {
/* ES6 #sec-string.prototype.lastindexof */ \
CPP(StringPrototypeLastIndexOf) \
/* ES #sec-string.prototype.matchAll */ \
- TFJ(StringPrototypeMatchAll, 1, kReceiver, kRegexp) \
+ TFJ(StringPrototypeMatchAll, kJSArgcReceiverSlots + 1, kReceiver, kRegexp) \
/* ES6 #sec-string.prototype.localecompare */ \
CPP(StringPrototypeLocaleCompare) \
/* ES6 #sec-string.prototype.replace */ \
- TFJ(StringPrototypeReplace, 2, kReceiver, kSearch, kReplace) \
+ TFJ(StringPrototypeReplace, kJSArgcReceiverSlots + 2, kReceiver, kSearch, \
+ kReplace) \
/* ES6 #sec-string.prototype.split */ \
TFJ(StringPrototypeSplit, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.raw */ \
@@ -868,15 +885,15 @@ namespace internal {
\
/* TypedArray */ \
/* ES #sec-typedarray-constructors */ \
- TFJ(TypedArrayBaseConstructor, 0, kReceiver) \
+ TFJ(TypedArrayBaseConstructor, kJSArgcReceiverSlots, kReceiver) \
TFJ(TypedArrayConstructor, kDontAdaptArgumentsSentinel) \
CPP(TypedArrayPrototypeBuffer) \
/* ES6 #sec-get-%typedarray%.prototype.bytelength */ \
- TFJ(TypedArrayPrototypeByteLength, 0, kReceiver) \
+ TFJ(TypedArrayPrototypeByteLength, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-get-%typedarray%.prototype.byteoffset */ \
- TFJ(TypedArrayPrototypeByteOffset, 0, kReceiver) \
+ TFJ(TypedArrayPrototypeByteOffset, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-get-%typedarray%.prototype.length */ \
- TFJ(TypedArrayPrototypeLength, 0, kReceiver) \
+ TFJ(TypedArrayPrototypeLength, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-%typedarray%.prototype.copywithin */ \
CPP(TypedArrayPrototypeCopyWithin) \
/* ES6 #sec-%typedarray%.prototype.fill */ \
@@ -890,7 +907,7 @@ namespace internal {
/* ES6 #sec-%typedarray%.prototype.reverse */ \
CPP(TypedArrayPrototypeReverse) \
/* ES6 #sec-get-%typedarray%.prototype-@@tostringtag */ \
- TFJ(TypedArrayPrototypeToStringTag, 0, kReceiver) \
+ TFJ(TypedArrayPrototypeToStringTag, kJSArgcReceiverSlots, kReceiver) \
/* ES6 %TypedArray%.prototype.map */ \
TFJ(TypedArrayPrototypeMap, kDontAdaptArgumentsSentinel) \
\
@@ -908,16 +925,16 @@ namespace internal {
/* WeakMap */ \
TFJ(WeakMapConstructor, kDontAdaptArgumentsSentinel) \
TFS(WeakMapLookupHashIndex, kTable, kKey) \
- TFJ(WeakMapGet, 1, kReceiver, kKey) \
- TFJ(WeakMapPrototypeHas, 1, kReceiver, kKey) \
- TFJ(WeakMapPrototypeSet, 2, kReceiver, kKey, kValue) \
- TFJ(WeakMapPrototypeDelete, 1, kReceiver, kKey) \
+ TFJ(WeakMapGet, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(WeakMapPrototypeHas, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(WeakMapPrototypeSet, kJSArgcReceiverSlots + 2, kReceiver, kKey, kValue) \
+ TFJ(WeakMapPrototypeDelete, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
\
/* WeakSet */ \
TFJ(WeakSetConstructor, kDontAdaptArgumentsSentinel) \
- TFJ(WeakSetPrototypeHas, 1, kReceiver, kKey) \
- TFJ(WeakSetPrototypeAdd, 1, kReceiver, kValue) \
- TFJ(WeakSetPrototypeDelete, 1, kReceiver, kValue) \
+ TFJ(WeakSetPrototypeHas, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(WeakSetPrototypeAdd, kJSArgcReceiverSlots + 1, kReceiver, kValue) \
+ TFJ(WeakSetPrototypeDelete, kJSArgcReceiverSlots + 1, kReceiver, kValue) \
\
/* WeakSet / WeakMap Helpers */ \
TFS(WeakCollectionDelete, kCollection, kKey) \
@@ -948,12 +965,18 @@ namespace internal {
/* specific to Async Generators. Internal / Not exposed to JS code. */ \
TFS(AsyncGeneratorAwaitCaught, kAsyncGeneratorObject, kValue) \
TFS(AsyncGeneratorAwaitUncaught, kAsyncGeneratorObject, kValue) \
- TFJ(AsyncGeneratorAwaitResolveClosure, 1, kReceiver, kValue) \
- TFJ(AsyncGeneratorAwaitRejectClosure, 1, kReceiver, kValue) \
- TFJ(AsyncGeneratorYieldResolveClosure, 1, kReceiver, kValue) \
- TFJ(AsyncGeneratorReturnClosedResolveClosure, 1, kReceiver, kValue) \
- TFJ(AsyncGeneratorReturnClosedRejectClosure, 1, kReceiver, kValue) \
- TFJ(AsyncGeneratorReturnResolveClosure, 1, kReceiver, kValue) \
+ TFJ(AsyncGeneratorAwaitResolveClosure, kJSArgcReceiverSlots + 1, kReceiver, \
+ kValue) \
+ TFJ(AsyncGeneratorAwaitRejectClosure, kJSArgcReceiverSlots + 1, kReceiver, \
+ kValue) \
+ TFJ(AsyncGeneratorYieldResolveClosure, kJSArgcReceiverSlots + 1, kReceiver, \
+ kValue) \
+ TFJ(AsyncGeneratorReturnClosedResolveClosure, kJSArgcReceiverSlots + 1, \
+ kReceiver, kValue) \
+ TFJ(AsyncGeneratorReturnClosedRejectClosure, kJSArgcReceiverSlots + 1, \
+ kReceiver, kValue) \
+ TFJ(AsyncGeneratorReturnResolveClosure, kJSArgcReceiverSlots + 1, kReceiver, \
+ kValue) \
\
/* Async-from-Sync Iterator */ \
\
@@ -966,7 +989,7 @@ namespace internal {
/* #sec-%asyncfromsynciteratorprototype%.return */ \
TFJ(AsyncFromSyncIteratorPrototypeReturn, kDontAdaptArgumentsSentinel) \
/* #sec-async-iterator-value-unwrap-functions */ \
- TFJ(AsyncIteratorValueUnwrap, 1, kReceiver, kValue) \
+ TFJ(AsyncIteratorValueUnwrap, kJSArgcReceiverSlots + 1, kReceiver, kValue) \
\
/* CEntry */ \
ASM(CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit, Dummy) \
@@ -1053,6 +1076,8 @@ namespace internal {
CPP(DisplayNamesSupportedLocalesOf) \
/* ecma402 #sec-intl.getcanonicallocales */ \
CPP(IntlGetCanonicalLocales) \
+ /* ecma402 #sec-intl.supportedvaluesof */ \
+ CPP(IntlSupportedValuesOf) \
/* ecma402 #sec-intl-listformat-constructor */ \
CPP(ListFormatConstructor) \
/* ecma402 #sec-intl-list-format.prototype.format */ \
@@ -1156,7 +1181,7 @@ namespace internal {
/* ecma402 #sup-string.prototype.tolocaleuppercase */ \
CPP(StringPrototypeToLocaleUpperCase) \
/* ES #sec-string.prototype.tolowercase */ \
- TFJ(StringPrototypeToLowerCaseIntl, 0, kReceiver) \
+ TFJ(StringPrototypeToLowerCaseIntl, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-string.prototype.touppercase */ \
CPP(StringPrototypeToUpperCaseIntl) \
TFS(StringToLowerCaseIntl, kString) \
diff --git a/deps/v8/src/builtins/builtins-descriptors.h b/deps/v8/src/builtins/builtins-descriptors.h
index c2eb44debe..12f7f58ec5 100644
--- a/deps/v8/src/builtins/builtins-descriptors.h
+++ b/deps/v8/src/builtins/builtins-descriptors.h
@@ -14,19 +14,20 @@ namespace v8 {
namespace internal {
// Define interface descriptors for builtins with JS linkage.
-#define DEFINE_TFJ_INTERFACE_DESCRIPTOR(Name, Argc, ...) \
- struct Builtin_##Name##_InterfaceDescriptor { \
- enum ParameterIndices { \
- kJSTarget = compiler::CodeAssembler::kTargetParameterIndex, \
- ##__VA_ARGS__, \
- kJSNewTarget, \
- kJSActualArgumentsCount, \
- kContext, \
- kParameterCount, \
- }; \
- static_assert((Argc) == static_cast<uint16_t>(kParameterCount - 4), \
- "Inconsistent set of arguments"); \
- static_assert(kJSTarget == -1, "Unexpected kJSTarget index value"); \
+#define DEFINE_TFJ_INTERFACE_DESCRIPTOR(Name, Argc, ...) \
+ struct Builtin_##Name##_InterfaceDescriptor { \
+ enum ParameterIndices { \
+ kJSTarget = compiler::CodeAssembler::kTargetParameterIndex, \
+ ##__VA_ARGS__, \
+ kJSNewTarget, \
+ kJSActualArgumentsCount, \
+ kContext, \
+ kParameterCount, \
+ }; \
+ static_assert((Argc) == static_cast<uint16_t>(kParameterCount - 4 + \
+ kJSArgcReceiverSlots), \
+ "Inconsistent set of arguments"); \
+ static_assert(kJSTarget == -1, "Unexpected kJSTarget index value"); \
};
// Define interface descriptors for builtins with StubCall linkage.
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index eb557b1ca1..ff39350725 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -219,11 +219,10 @@ TF_BUILTIN(SuspendGeneratorBaseline, GeneratorBuiltinsAssembler) {
TNode<JSFunction> closure = LoadJSGeneratorObjectFunction(generator);
auto sfi = LoadJSFunctionSharedFunctionInfo(closure);
- TNode<IntPtrT> formal_parameter_count = Signed(
- ChangeUint32ToWord(LoadSharedFunctionInfoFormalParameterCount(sfi)));
- CSA_ASSERT(this, Word32BinaryNot(IntPtrEqual(
- formal_parameter_count,
- IntPtrConstant(kDontAdaptArgumentsSentinel))));
+ CSA_ASSERT(this,
+ Word32BinaryNot(IsSharedFunctionInfoDontAdaptArguments(sfi)));
+ TNode<IntPtrT> formal_parameter_count = Signed(ChangeUint32ToWord(
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(sfi)));
TNode<FixedArray> parameters_and_registers =
LoadJSGeneratorObjectParametersAndRegisters(generator);
@@ -274,11 +273,10 @@ TF_BUILTIN(ResumeGeneratorBaseline, GeneratorBuiltinsAssembler) {
auto generator = Parameter<JSGeneratorObject>(Descriptor::kGeneratorObject);
TNode<JSFunction> closure = LoadJSGeneratorObjectFunction(generator);
auto sfi = LoadJSFunctionSharedFunctionInfo(closure);
- TNode<IntPtrT> formal_parameter_count = Signed(
- ChangeUint32ToWord(LoadSharedFunctionInfoFormalParameterCount(sfi)));
- CSA_ASSERT(this, Word32BinaryNot(IntPtrEqual(
- formal_parameter_count,
- IntPtrConstant(kDontAdaptArgumentsSentinel))));
+ CSA_ASSERT(this,
+ Word32BinaryNot(IsSharedFunctionInfoDontAdaptArguments(sfi)));
+ TNode<IntPtrT> formal_parameter_count = Signed(ChangeUint32ToWord(
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(sfi)));
TNode<FixedArray> parameters_and_registers =
LoadJSGeneratorObjectParametersAndRegisters(generator);
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index 49ad4b4e7c..03f9fb932a 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -439,10 +439,9 @@ class TSANRelaxedStoreCodeStubAssembler : public CodeStubAssembler {
void GenerateTSANRelaxedStore(SaveFPRegsMode fp_mode, int size) {
TNode<ExternalReference> function = GetExternalReference(size);
- auto address =
- UncheckedParameter<IntPtrT>(TSANRelaxedStoreDescriptor::kAddress);
+ auto address = UncheckedParameter<IntPtrT>(TSANStoreDescriptor::kAddress);
TNode<IntPtrT> value = BitcastTaggedToWord(
- UncheckedParameter<Object>(TSANRelaxedStoreDescriptor::kValue));
+ UncheckedParameter<Object>(TSANStoreDescriptor::kValue));
CallCFunctionWithCallerSavedRegisters(
function, MachineType::Int32(), fp_mode,
std::make_pair(MachineType::IntPtr(), address),
@@ -483,6 +482,73 @@ TF_BUILTIN(TSANRelaxedStore64SaveFP, TSANRelaxedStoreCodeStubAssembler) {
GenerateTSANRelaxedStore(SaveFPRegsMode::kSave, kInt64Size);
}
+class TSANSeqCstStoreCodeStubAssembler : public CodeStubAssembler {
+ public:
+ explicit TSANSeqCstStoreCodeStubAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ TNode<ExternalReference> GetExternalReference(int size) {
+ if (size == kInt8Size) {
+ return ExternalConstant(
+ ExternalReference::tsan_seq_cst_store_function_8_bits());
+ } else if (size == kInt16Size) {
+ return ExternalConstant(
+ ExternalReference::tsan_seq_cst_store_function_16_bits());
+ } else if (size == kInt32Size) {
+ return ExternalConstant(
+ ExternalReference::tsan_seq_cst_store_function_32_bits());
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return ExternalConstant(
+ ExternalReference::tsan_seq_cst_store_function_64_bits());
+ }
+ }
+
+ void GenerateTSANSeqCstStore(SaveFPRegsMode fp_mode, int size) {
+ TNode<ExternalReference> function = GetExternalReference(size);
+ auto address = UncheckedParameter<IntPtrT>(TSANStoreDescriptor::kAddress);
+ TNode<IntPtrT> value = BitcastTaggedToWord(
+ UncheckedParameter<Object>(TSANStoreDescriptor::kValue));
+ CallCFunctionWithCallerSavedRegisters(
+ function, MachineType::Int32(), fp_mode,
+ std::make_pair(MachineType::IntPtr(), address),
+ std::make_pair(MachineType::IntPtr(), value));
+ Return(UndefinedConstant());
+ }
+};
+
+TF_BUILTIN(TSANSeqCstStore8IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt8Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore8SaveFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt8Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore16IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt16Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore16SaveFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt16Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore32IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt32Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore32SaveFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt32Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore64IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt64Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore64SaveFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt64Size);
+}
+
class TSANRelaxedLoadCodeStubAssembler : public CodeStubAssembler {
public:
explicit TSANRelaxedLoadCodeStubAssembler(compiler::CodeAssemblerState* state)
@@ -501,8 +567,7 @@ class TSANRelaxedLoadCodeStubAssembler : public CodeStubAssembler {
void GenerateTSANRelaxedLoad(SaveFPRegsMode fp_mode, int size) {
TNode<ExternalReference> function = GetExternalReference(size);
- auto address =
- UncheckedParameter<IntPtrT>(TSANRelaxedLoadDescriptor::kAddress);
+ auto address = UncheckedParameter<IntPtrT>(TSANLoadDescriptor::kAddress);
CallCFunctionWithCallerSavedRegisters(
function, MachineType::Int32(), fp_mode,
std::make_pair(MachineType::IntPtr(), address));
@@ -888,21 +953,23 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
auto actual_argc =
UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ CodeStubArguments args(this, actual_argc);
- TVARIABLE(Int32T, pushed_argc, actual_argc);
+ TVARIABLE(Int32T, pushed_argc,
+ TruncateIntPtrToInt32(args.GetLengthWithReceiver()));
TNode<SharedFunctionInfo> shared = LoadJSFunctionSharedFunctionInfo(target);
- TNode<Int32T> formal_count =
- UncheckedCast<Int32T>(LoadSharedFunctionInfoFormalParameterCount(shared));
+ TNode<Int32T> formal_count = UncheckedCast<Int32T>(
+ LoadSharedFunctionInfoFormalParameterCountWithReceiver(shared));
// The number of arguments pushed is the maximum of actual arguments count
// and formal parameters count. Except when the formal parameters count is
// the sentinel.
Label check_argc(this), update_argc(this), done_argc(this);
- Branch(Word32Equal(formal_count, Int32Constant(kDontAdaptArgumentsSentinel)),
- &done_argc, &check_argc);
+ Branch(IsSharedFunctionInfoDontAdaptArguments(shared), &done_argc,
+ &check_argc);
BIND(&check_argc);
Branch(Int32GreaterThan(formal_count, pushed_argc.value()), &update_argc,
&done_argc);
@@ -915,7 +982,7 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
// including the receiver and the extra arguments.
TNode<Int32T> argc = Int32Add(
pushed_argc.value(),
- Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
+ Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithoutReceiver));
const bool builtin_exit_frame = true;
TNode<Code> code =
@@ -1053,9 +1120,7 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS
+#if ENABLE_SPARKPLUG
void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) {
EmitReturnBaseline(masm);
}
@@ -1241,17 +1306,17 @@ TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) {
GotoIf(TaggedIsSmi(maybe_result_or_smi_zero), &tailcall_to_function);
TNode<SharedFunctionInfo> shared = LoadJSFunctionSharedFunctionInfo(function);
- TNode<Int32T> parameter_count =
- UncheckedCast<Int32T>(LoadSharedFunctionInfoFormalParameterCount(shared));
+ TNode<Int32T> parameter_count = UncheckedCast<Int32T>(
+ LoadSharedFunctionInfoFormalParameterCountWithReceiver(shared));
// This builtin intercepts a call to {function}, where the number of arguments
// pushed is the maximum of actual arguments count and formal parameters
// count.
Label argc_lt_param_count(this), argc_ge_param_count(this);
- Branch(IntPtrLessThan(args.GetLength(), ChangeInt32ToIntPtr(parameter_count)),
+ Branch(IntPtrLessThan(args.GetLengthWithReceiver(),
+ ChangeInt32ToIntPtr(parameter_count)),
&argc_lt_param_count, &argc_ge_param_count);
BIND(&argc_lt_param_count);
- PopAndReturn(Int32Add(parameter_count, Int32Constant(1)),
- maybe_result_or_smi_zero);
+ PopAndReturn(parameter_count, maybe_result_or_smi_zero);
BIND(&argc_ge_param_count);
args.PopAndReturn(maybe_result_or_smi_zero);
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index c3711898c3..cff87636cb 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -236,7 +236,7 @@ Handle<JSFunction> CreateBoundFunction(Isolate* isolate,
Handle<SharedFunctionInfo> info =
isolate->factory()->NewSharedFunctionInfoForBuiltin(
isolate->factory()->empty_string(), builtin, kNormalFunction);
- info->set_internal_formal_parameter_count(len);
+ info->set_internal_formal_parameter_count(JSParameterCount(len));
info->set_length(len);
return Factory::JSFunctionBuilder{isolate, info, context}
@@ -576,6 +576,13 @@ BUILTIN(IntlGetCanonicalLocales) {
Intl::GetCanonicalLocales(isolate, locales));
}
+BUILTIN(IntlSupportedValuesOf) {
+ HandleScope scope(isolate);
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+
+ RETURN_RESULT_OR_FAILURE(isolate, Intl::SupportedValuesOf(isolate, locales));
+}
+
BUILTIN(ListFormatConstructor) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc
index 6ee50ac737..4fb5de7eb5 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.cc
+++ b/deps/v8/src/builtins/builtins-lazy-gen.cc
@@ -156,8 +156,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
TVARIABLE(Code, code);
// Check if we have baseline code.
- GotoIf(InstanceTypeEqual(sfi_data_type.value(), BASELINE_DATA_TYPE),
- &baseline);
+ GotoIf(InstanceTypeEqual(sfi_data_type.value(), CODET_TYPE), &baseline);
code = sfi_code;
Goto(&tailcall_code);
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 68112e5bff..558b582789 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -436,7 +436,9 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
Label done(this);
// 2. If only one argument was passed, return to.
- GotoIf(UintPtrLessThanOrEqual(args.GetLength(), IntPtrConstant(1)), &done);
+ GotoIf(UintPtrLessThanOrEqual(args.GetLengthWithoutReceiver(),
+ IntPtrConstant(1)),
+ &done);
// 3. Let sources be the List of argument values starting with the
// second argument.
@@ -1242,9 +1244,8 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
TNode<BytecodeArray> bytecode_array =
LoadSharedFunctionInfoBytecodeArray(shared);
- TNode<IntPtrT> formal_parameter_count =
- ChangeInt32ToIntPtr(LoadObjectField<Uint16T>(
- shared, SharedFunctionInfo::kFormalParameterCountOffset));
+ TNode<IntPtrT> formal_parameter_count = ChangeInt32ToIntPtr(
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(shared));
TNode<IntPtrT> frame_size = ChangeInt32ToIntPtr(
LoadObjectField<Int32T>(bytecode_array, BytecodeArray::kFrameSizeOffset));
TNode<IntPtrT> size =
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 16304a56a5..9442b64d06 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -121,10 +121,10 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
TNode<Object> receiver = args.GetReceiver();
// 7. Let argArray be CreateArrayFromList(argumentsList).
- TNode<JSArray> array =
- EmitFastNewAllArguments(UncheckedCast<Context>(context),
- UncheckedCast<RawPtrT>(LoadFramePointer()),
- UncheckedCast<IntPtrT>(argc_ptr));
+ TNode<JSArray> array = EmitFastNewAllArguments(
+ UncheckedCast<Context>(context),
+ UncheckedCast<RawPtrT>(LoadFramePointer()),
+ UncheckedCast<IntPtrT>(args.GetLengthWithoutReceiver()));
// 8. Return Call(trap, handler, «target, thisArgument, argArray»).
TNode<Object> result = Call(context, trap, handler, target, receiver, array);
@@ -174,10 +174,10 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
CodeStubArguments args(this, argc_ptr);
// 7. Let argArray be CreateArrayFromList(argumentsList).
- TNode<JSArray> array =
- EmitFastNewAllArguments(UncheckedCast<Context>(context),
- UncheckedCast<RawPtrT>(LoadFramePointer()),
- UncheckedCast<IntPtrT>(argc_ptr));
+ TNode<JSArray> array = EmitFastNewAllArguments(
+ UncheckedCast<Context>(context),
+ UncheckedCast<RawPtrT>(LoadFramePointer()),
+ UncheckedCast<IntPtrT>(args.GetLengthWithoutReceiver()));
// 8. Let newObj be ? Call(trap, handler, « target, argArray, newTarget »).
TNode<Object> new_obj =
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 535188c567..6e4307b404 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -18,6 +18,7 @@
#include "src/objects/js-regexp-string-iterator.h"
#include "src/objects/js-regexp.h"
#include "src/objects/regexp-match-info.h"
+#include "src/regexp/regexp-flags.h"
namespace v8 {
namespace internal {
@@ -1041,23 +1042,16 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
CAST(LoadObjectField(CAST(regexp), JSRegExp::kFlagsOffset));
var_flags = SmiUntag(flags_smi);
-#define CASE_FOR_FLAG(FLAG) \
- do { \
- Label next(this); \
- GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \
- var_length = Uint32Add(var_length.value(), Uint32Constant(1)); \
- Goto(&next); \
- BIND(&next); \
- } while (false)
+#define CASE_FOR_FLAG(Lower, Camel, ...) \
+ do { \
+ Label next(this); \
+ GotoIfNot(IsSetWord(var_flags.value(), JSRegExp::k##Camel), &next); \
+ var_length = Uint32Add(var_length.value(), Uint32Constant(1)); \
+ Goto(&next); \
+ BIND(&next); \
+ } while (false);
- CASE_FOR_FLAG(JSRegExp::kHasIndices);
- CASE_FOR_FLAG(JSRegExp::kGlobal);
- CASE_FOR_FLAG(JSRegExp::kIgnoreCase);
- CASE_FOR_FLAG(JSRegExp::kLinear);
- CASE_FOR_FLAG(JSRegExp::kMultiline);
- CASE_FOR_FLAG(JSRegExp::kDotAll);
- CASE_FOR_FLAG(JSRegExp::kUnicode);
- CASE_FOR_FLAG(JSRegExp::kSticky);
+ REGEXP_FLAG_LIST(CASE_FOR_FLAG)
#undef CASE_FOR_FLAG
} else {
DCHECK(!is_fastpath);
@@ -1123,26 +1117,19 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
TVARIABLE(IntPtrT, var_offset,
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-#define CASE_FOR_FLAG(FLAG, CHAR) \
- do { \
- Label next(this); \
- GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \
- const TNode<Int32T> value = Int32Constant(CHAR); \
- StoreNoWriteBarrier(MachineRepresentation::kWord8, string, \
- var_offset.value(), value); \
- var_offset = IntPtrAdd(var_offset.value(), int_one); \
- Goto(&next); \
- BIND(&next); \
- } while (false)
-
- CASE_FOR_FLAG(JSRegExp::kHasIndices, 'd');
- CASE_FOR_FLAG(JSRegExp::kGlobal, 'g');
- CASE_FOR_FLAG(JSRegExp::kIgnoreCase, 'i');
- CASE_FOR_FLAG(JSRegExp::kLinear, 'l');
- CASE_FOR_FLAG(JSRegExp::kMultiline, 'm');
- CASE_FOR_FLAG(JSRegExp::kDotAll, 's');
- CASE_FOR_FLAG(JSRegExp::kUnicode, 'u');
- CASE_FOR_FLAG(JSRegExp::kSticky, 'y');
+#define CASE_FOR_FLAG(Lower, Camel, LowerCamel, Char, ...) \
+ do { \
+ Label next(this); \
+ GotoIfNot(IsSetWord(var_flags.value(), JSRegExp::k##Camel), &next); \
+ const TNode<Int32T> value = Int32Constant(Char); \
+ StoreNoWriteBarrier(MachineRepresentation::kWord8, string, \
+ var_offset.value(), value); \
+ var_offset = IntPtrAdd(var_offset.value(), int_one); \
+ Goto(&next); \
+ BIND(&next); \
+ } while (false);
+
+ REGEXP_FLAG_LIST(CASE_FOR_FLAG)
#undef CASE_FOR_FLAG
if (is_fastpath) {
@@ -1391,29 +1378,12 @@ TNode<BoolT> RegExpBuiltinsAssembler::SlowFlagGetter(TNode<Context> context,
switch (flag) {
case JSRegExp::kNone:
UNREACHABLE();
- case JSRegExp::kGlobal:
- name = isolate()->factory()->global_string();
- break;
- case JSRegExp::kIgnoreCase:
- name = isolate()->factory()->ignoreCase_string();
- break;
- case JSRegExp::kMultiline:
- name = isolate()->factory()->multiline_string();
- break;
- case JSRegExp::kDotAll:
- UNREACHABLE(); // Never called for dotAll.
- case JSRegExp::kSticky:
- name = isolate()->factory()->sticky_string();
- break;
- case JSRegExp::kUnicode:
- name = isolate()->factory()->unicode_string();
- break;
- case JSRegExp::kHasIndices:
- name = isolate()->factory()->has_indices_string();
- break;
- case JSRegExp::kLinear:
- name = isolate()->factory()->linear_string();
- break;
+#define V(Lower, Camel, LowerCamel, Char, Bit) \
+ case JSRegExp::k##Camel: \
+ name = isolate()->factory()->LowerCamel##_string(); \
+ break;
+ REGEXP_FLAG_LIST(V)
+#undef V
}
TNode<Object> value = GetProperty(context, regexp, name);
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index fa536792ed..ff0b5d4722 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -204,26 +204,28 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels));
BIND(&i8);
- Return(SmiFromInt32(AtomicLoad<Int8T>(backing_store, index_word)));
+ Return(SmiFromInt32(AtomicLoad<Int8T>(AtomicMemoryOrder::kSeqCst,
+ backing_store, index_word)));
BIND(&u8);
- Return(SmiFromInt32(AtomicLoad<Uint8T>(backing_store, index_word)));
+ Return(SmiFromInt32(AtomicLoad<Uint8T>(AtomicMemoryOrder::kSeqCst,
+ backing_store, index_word)));
BIND(&i16);
- Return(
- SmiFromInt32(AtomicLoad<Int16T>(backing_store, WordShl(index_word, 1))));
+ Return(SmiFromInt32(AtomicLoad<Int16T>(
+ AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 1))));
BIND(&u16);
- Return(
- SmiFromInt32(AtomicLoad<Uint16T>(backing_store, WordShl(index_word, 1))));
+ Return(SmiFromInt32(AtomicLoad<Uint16T>(
+ AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 1))));
BIND(&i32);
- Return(ChangeInt32ToTagged(
- AtomicLoad<Int32T>(backing_store, WordShl(index_word, 2))));
+ Return(ChangeInt32ToTagged(AtomicLoad<Int32T>(
+ AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 2))));
BIND(&u32);
- Return(ChangeUint32ToTagged(
- AtomicLoad<Uint32T>(backing_store, WordShl(index_word, 2))));
+ Return(ChangeUint32ToTagged(AtomicLoad<Uint32T>(
+ AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 2))));
#if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
BIND(&i64);
Goto(&u64);
@@ -235,12 +237,12 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
}
#else
BIND(&i64);
- Return(BigIntFromSigned64(
- AtomicLoad64<AtomicInt64>(backing_store, WordShl(index_word, 3))));
+ Return(BigIntFromSigned64(AtomicLoad64<AtomicInt64>(
+ AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3))));
BIND(&u64);
- Return(BigIntFromUnsigned64(
- AtomicLoad64<AtomicUint64>(backing_store, WordShl(index_word, 3))));
+ Return(BigIntFromUnsigned64(AtomicLoad64<AtomicUint64>(
+ AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3))));
#endif
// This shouldn't happen, we've already validated the type.
@@ -307,18 +309,18 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels));
BIND(&u8);
- AtomicStore(MachineRepresentation::kWord8, backing_store, index_word,
- value_word32);
+ AtomicStore(MachineRepresentation::kWord8, AtomicMemoryOrder::kSeqCst,
+ backing_store, index_word, value_word32);
Return(value_integer);
BIND(&u16);
- AtomicStore(MachineRepresentation::kWord16, backing_store,
- WordShl(index_word, 1), value_word32);
+ AtomicStore(MachineRepresentation::kWord16, AtomicMemoryOrder::kSeqCst,
+ backing_store, WordShl(index_word, 1), value_word32);
Return(value_integer);
BIND(&u32);
- AtomicStore(MachineRepresentation::kWord32, backing_store,
- WordShl(index_word, 2), value_word32);
+ AtomicStore(MachineRepresentation::kWord32, AtomicMemoryOrder::kSeqCst,
+ backing_store, WordShl(index_word, 2), value_word32);
Return(value_integer);
BIND(&u64);
@@ -340,7 +342,8 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
TVARIABLE(UintPtrT, var_high);
BigIntToRawBytes(value_bigint, &var_low, &var_high);
TNode<UintPtrT> high = Is64() ? TNode<UintPtrT>() : var_high.value();
- AtomicStore64(backing_store, WordShl(index_word, 3), var_low.value(), high);
+ AtomicStore64(AtomicMemoryOrder::kSeqCst, backing_store,
+ WordShl(index_word, 3), var_low.value(), high);
Return(value_bigint);
#endif
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 61c1d8d387..0ce2fd0f17 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -792,12 +792,12 @@ TF_BUILTIN(StringFromCharCode, StringBuiltinsAssembler) {
CodeStubArguments arguments(this, argc);
TNode<Uint32T> unsigned_argc =
- Unsigned(TruncateIntPtrToInt32(arguments.GetLength()));
+ Unsigned(TruncateIntPtrToInt32(arguments.GetLengthWithoutReceiver()));
// Check if we have exactly one argument (plus the implicit receiver), i.e.
// if the parent frame is not an arguments adaptor frame.
Label if_oneargument(this), if_notoneargument(this);
- Branch(IntPtrEqual(arguments.GetLength(), IntPtrConstant(1)), &if_oneargument,
- &if_notoneargument);
+ Branch(IntPtrEqual(arguments.GetLengthWithoutReceiver(), IntPtrConstant(1)),
+ &if_oneargument, &if_notoneargument);
BIND(&if_oneargument);
{
diff --git a/deps/v8/src/builtins/builtins-string.tq b/deps/v8/src/builtins/builtins-string.tq
index 4111155fd2..663ba86cdb 100644
--- a/deps/v8/src/builtins/builtins-string.tq
+++ b/deps/v8/src/builtins/builtins-string.tq
@@ -32,7 +32,7 @@ transitioning macro ToStringImpl(context: Context, o: JSAny): String {
ThrowTypeError(MessageTemplate::kSymbolToString);
}
case (JSAny): {
- return runtime::ToString(context, o);
+ return runtime::ToString(context, result);
}
}
}
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index a76650d052..0fd0c32340 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -65,9 +65,8 @@ TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kByteLengthOffset,
byte_length);
- InitializeExternalPointerField(buffer, JSArrayBuffer::kBackingStoreOffset,
- PointerConstant(nullptr),
- kArrayBufferBackingStoreTag);
+ StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBackingStoreOffset,
+ PointerConstant(nullptr));
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kExtensionOffset,
IntPtrConstant(0));
for (int offset = JSArrayBuffer::kHeaderSize;
@@ -404,12 +403,6 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
BIND(&next);
}
-void TypedArrayBuiltinsAssembler::AllocateJSTypedArrayExternalPointerEntry(
- TNode<JSTypedArray> holder) {
- InitializeExternalPointerField(
- holder, IntPtrConstant(JSTypedArray::kExternalPointerOffset));
-}
-
void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
TNode<JSTypedArray> holder, TNode<ByteArray> base, TNode<UintPtrT> offset) {
offset = UintPtrAdd(UintPtrConstant(ByteArray::kHeaderSize - kHeapObjectTag),
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index bb8a15ef02..a309f67286 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -83,7 +83,6 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
void DispatchTypedArrayByElementsKind(
TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function);
- void AllocateJSTypedArrayExternalPointerEntry(TNode<JSTypedArray> holder);
void SetJSTypedArrayOnHeapDataPtr(TNode<JSTypedArray> holder,
TNode<ByteArray> base,
TNode<UintPtrT> offset);
diff --git a/deps/v8/src/builtins/console.tq b/deps/v8/src/builtins/console.tq
index c0daa19b6d..483b5422d8 100644
--- a/deps/v8/src/builtins/console.tq
+++ b/deps/v8/src/builtins/console.tq
@@ -12,7 +12,8 @@ javascript builtin FastConsoleAssert(
if (ToBoolean(arguments[0])) {
return Undefined;
} else {
- tail ConsoleAssert(target, newTarget, Convert<int32>(arguments.length));
+ tail ConsoleAssert(
+ target, newTarget, Convert<int32>(arguments.actual_count));
}
}
}
diff --git a/deps/v8/src/builtins/convert.tq b/deps/v8/src/builtins/convert.tq
index c1c73d0060..2849b782c8 100644
--- a/deps/v8/src/builtins/convert.tq
+++ b/deps/v8/src/builtins/convert.tq
@@ -180,6 +180,9 @@ Convert<uint8, intptr>(i: intptr): uint8 {
Convert<int8, intptr>(i: intptr): int8 {
return %RawDownCast<int8>(TruncateIntPtrToInt32(i) << 24 >> 24);
}
+Convert<uint16, uint32>(i: uint32): uint16 {
+ return %RawDownCast<uint16>(i & 0xFFFF);
+}
Convert<int32, uint8>(i: uint8): int32 {
return Signed(Convert<uint32>(i));
}
diff --git a/deps/v8/src/builtins/frame-arguments.tq b/deps/v8/src/builtins/frame-arguments.tq
index 5f25c97dc3..9dd26e2327 100644
--- a/deps/v8/src/builtins/frame-arguments.tq
+++ b/deps/v8/src/builtins/frame-arguments.tq
@@ -6,7 +6,11 @@
struct Arguments {
const frame: FrameWithArguments;
const base: RawPtr;
+ // length is the number of arguments without the receiver.
const length: intptr;
+ // actual_count is the actual number of arguments on the stack (depending on
+ // kJSArgcIncludesReceiver may or may not include the receiver).
+ const actual_count: intptr;
}
extern operator '[]' macro GetArgumentValue(Arguments, intptr): JSAny;
@@ -45,8 +49,8 @@ macro GetFrameWithArgumentsInfo(implicit context: Context)():
const f: JSFunction = frame.function;
const shared: SharedFunctionInfo = f.shared_function_info;
- const formalParameterCount: bint =
- Convert<bint>(Convert<int32>(shared.formal_parameter_count));
+ const formalParameterCount: bint = Convert<bint>(Convert<int32>(
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(shared)));
// TODO(victorgomes): When removing the v8_disable_arguments_adaptor flag,
// FrameWithArgumentsInfo can be simplified, since the frame field already
// contains the argument count.
diff --git a/deps/v8/src/builtins/frames.tq b/deps/v8/src/builtins/frames.tq
index 03336bd464..3e959a094f 100644
--- a/deps/v8/src/builtins/frames.tq
+++ b/deps/v8/src/builtins/frames.tq
@@ -66,8 +66,12 @@ operator '.caller' macro LoadCallerFromFrame(f: Frame): Frame {
const kStandardFrameArgCOffset: constexpr int31
generates 'StandardFrameConstants::kArgCOffset';
+const kJSArgcReceiverSlots: constexpr int31
+ generates 'kJSArgcReceiverSlots';
+
operator '.argument_count' macro LoadArgCFromFrame(f: Frame): intptr {
- return LoadIntptrFromFrame(f, kStandardFrameArgCOffset);
+ return LoadIntptrFromFrame(f, kStandardFrameArgCOffset) -
+ kJSArgcReceiverSlots;
}
type ContextOrFrameType = Context|FrameType;
diff --git a/deps/v8/src/builtins/function.tq b/deps/v8/src/builtins/function.tq
index e6ce7edfef..682fdce4ba 100644
--- a/deps/v8/src/builtins/function.tq
+++ b/deps/v8/src/builtins/function.tq
@@ -38,7 +38,7 @@ transitioning javascript builtin
FastFunctionPrototypeBind(
js-implicit context: NativeContext, receiver: JSAny, newTarget: JSAny,
target: JSFunction)(...arguments): JSAny {
- const argc: intptr = arguments.length;
+ const argc: intptr = arguments.actual_count;
try {
typeswitch (receiver) {
case (fn: JSFunction|JSBoundFunction): {
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 7a8875fee9..63aba94fe9 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -78,6 +78,36 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
+enum class ArgumentsElementType {
+ kRaw, // Push arguments as they are.
+ kHandle // Dereference arguments before pushing.
+};
+
+void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
+ Register scratch1, Register scratch2,
+ ArgumentsElementType element_type) {
+ DCHECK(!AreAliased(array, argc, scratch1, scratch2));
+ Register counter = scratch1;
+ Label loop, entry;
+ if (kJSArgcIncludesReceiver) {
+ __ lea(counter, Operand(argc, -kJSArgcReceiverSlots));
+ } else {
+ __ mov(counter, argc);
+ }
+ __ jmp(&entry);
+ __ bind(&loop);
+ Operand value(array, counter, times_system_pointer_size, 0);
+ if (element_type == ArgumentsElementType::kHandle) {
+ DCHECK(scratch2 != no_reg);
+ __ mov(scratch2, value);
+ value = Operand(scratch2, 0);
+ }
+ __ Push(value);
+ __ bind(&entry);
+ __ dec(counter);
+ __ j(greater_equal, &loop, Label::kNear);
+}
+
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax: number of arguments
@@ -109,7 +139,10 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ lea(esi, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize));
// Copy arguments to the expression stack.
- __ PushArray(esi, eax, ecx);
+ // esi: Pointer to start of arguments.
+ // eax: Number of arguments.
+ Generate_PushArguments(masm, esi, eax, ecx, no_reg,
+ ArgumentsElementType::kRaw);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
@@ -130,7 +163,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ ret(0);
__ bind(&stack_overflow);
@@ -237,7 +272,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// InvokeFunction.
// Copy arguments to the expression stack.
- __ PushArray(edi, eax, ecx);
+ // edi: Pointer to start of arguments.
+ // eax: Number of arguments.
+ Generate_PushArguments(masm, edi, eax, ecx, no_reg,
+ ArgumentsElementType::kRaw);
// Push implicit receiver.
__ movd(ecx, xmm0);
@@ -282,7 +320,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ ret(0);
// Otherwise we do a smi check and fall through to check if the return value
@@ -497,17 +537,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&enough_stack_space);
- // Copy arguments to the stack in a loop.
- Label loop, entry;
- __ Move(ecx, eax);
- __ jmp(&entry, Label::kNear);
- __ bind(&loop);
- // Push the parameter from argv.
- __ mov(scratch2, Operand(scratch1, ecx, times_system_pointer_size, 0));
- __ push(Operand(scratch2, 0)); // dereference handle
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
+ // Copy arguments to the stack.
+ // scratch1 (edx): Pointer to start of arguments.
+ // eax: Number of arguments.
+ Generate_PushArguments(masm, scratch1, eax, ecx, scratch2,
+ ArgumentsElementType::kHandle);
// Load the previous frame pointer to access C arguments
__ mov(scratch2, Operand(ebp, 0));
@@ -562,6 +596,16 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
__ bind(&done);
}
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ mov(scratch, FieldOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ cmp(scratch, Immediate(static_cast<int>(CodeKind::BASELINE)));
+ __ Assert(equal, AbortReason::kExpectedBaselineData);
+}
+
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Register sfi_data,
Register scratch1,
@@ -570,8 +614,16 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ LoadMap(scratch1, sfi_data);
- __ CmpInstanceType(scratch1, BASELINE_DATA_TYPE);
- __ j(equal, is_baseline);
+ __ CmpInstanceType(scratch1, CODET_TYPE);
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ j(not_equal, &not_baseline);
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ __ j(equal, is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ j(equal, is_baseline);
+ }
__ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
__ j(not_equal, &done, Label::kNear);
@@ -641,6 +693,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ movzx_w(ecx, FieldOperand(
ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ dec(ecx);
+ }
__ mov(ebx,
FieldOperand(edx, JSGeneratorObject::kParametersAndRegistersOffset));
{
@@ -677,7 +732,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&is_baseline);
__ Pop(eax);
- __ CmpObjectType(ecx, BASELINE_DATA_TYPE, ecx);
+ __ CmpObjectType(ecx, CODET_TYPE, ecx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
__ bind(&ok);
@@ -757,7 +812,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ mov(actual_params_size, Operand(ebp, StandardFrameConstants::kArgCOffset));
__ lea(actual_params_size,
Operand(actual_params_size, times_system_pointer_size,
- kSystemPointerSize));
+ kJSArgcIncludesReceiver ? 0 : kSystemPointerSize));
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -1008,7 +1063,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
// stack left to right.
//
// The live registers are:
-// o eax: actual argument count (not including the receiver)
+// o eax: actual argument count
// o edi: the JS function object being called
// o edx: the incoming new target or generator object
// o esi: our context
@@ -1257,7 +1312,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the baseline code into the closure.
__ movd(ecx, xmm2);
- __ mov(ecx, FieldOperand(ecx, BaselineData::kBaselineCodeOffset));
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ push(edx); // Spill.
__ push(ecx);
@@ -1303,7 +1357,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
InterpreterPushArgsMode mode) {
DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- ecx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
@@ -1321,19 +1375,22 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Add a stack check before pushing the arguments.
__ StackOverflowCheck(eax, scratch, &stack_overflow, true);
-
__ movd(xmm0, eax); // Spill number of arguments.
// Compute the expected number of arguments.
- __ mov(scratch, eax);
+ int argc_modification = kJSArgcIncludesReceiver ? 0 : 1;
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ argc_modification -= 1;
+ }
+ if (argc_modification != 0) {
+ __ lea(scratch, Operand(eax, argc_modification));
+ } else {
+ __ mov(scratch, eax);
+ }
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(eax);
- if (receiver_mode != ConvertReceiverMode::kNullOrUndefined) {
- __ add(scratch, Immediate(1)); // Add one for receiver.
- }
-
// Find the address of the last argument.
__ shl(scratch, kSystemPointerSizeLog2);
__ neg(scratch);
@@ -1385,9 +1442,10 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
Label* stack_overflow) {
// We have to move return address and the temporary registers above it
// before we can copy arguments onto the stack. To achieve this:
- // Step 1: Increment the stack pointer by num_args + 1 (for receiver).
- // Step 2: Move the return address and values around it to the top of stack.
- // Step 3: Copy the arguments into the correct locations.
+ // Step 1: Increment the stack pointer by num_args + 1 for receiver (if it is
+ // not included in argc already). Step 2: Move the return address and values
+ // around it to the top of stack. Step 3: Copy the arguments into the correct
+ // locations.
// current stack =====> required stack layout
// | | | return addr | (2) <-- esp (1)
// | | | addtl. slot |
@@ -1402,8 +1460,10 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// Step 1 - Update the stack pointer.
+ constexpr int receiver_offset =
+ kJSArgcIncludesReceiver ? 0 : kSystemPointerSize;
__ lea(scratch1,
- Operand(num_args, times_system_pointer_size, kSystemPointerSize));
+ Operand(num_args, times_system_pointer_size, receiver_offset));
__ AllocateStackSpace(scratch1);
// Step 2 move return_address and slots around it to the correct locations.
@@ -1412,7 +1472,7 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// extra slot for receiver, so no extra checks are required to avoid copy.
for (int i = 0; i < num_slots_to_move + 1; i++) {
__ mov(scratch1, Operand(esp, num_args, times_system_pointer_size,
- (i + 1) * kSystemPointerSize));
+ i * kSystemPointerSize + receiver_offset));
__ mov(Operand(esp, i * kSystemPointerSize), scratch1);
}
@@ -1434,7 +1494,11 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
__ bind(&loop_check);
__ inc(scratch1);
__ cmp(scratch1, eax);
- __ j(less_equal, &loop_header, Label::kNear);
+ if (kJSArgcIncludesReceiver) {
+ __ j(less, &loop_header, Label::kNear);
+ } else {
+ __ j(less_equal, &loop_header, Label::kNear);
+ }
}
} // anonymous namespace
@@ -1443,7 +1507,7 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- ecx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order
// as they are to be pushed onto the stack.
@@ -1832,7 +1896,8 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// the LAZY deopt point. eax contains the arguments count, the return value
// from LAZY is always the last argument.
__ movd(Operand(esp, eax, times_system_pointer_size,
- BuiltinContinuationFrameConstants::kFixedFrameSize),
+ BuiltinContinuationFrameConstants::kFixedFrameSize -
+ (kJSArgcIncludesReceiver ? kSystemPointerSize : 0)),
xmm0);
}
__ mov(
@@ -1894,23 +1959,29 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Label no_arg_array, no_this_arg;
StackArgumentsAccessor args(eax);
// Spill receiver to allow the usage of edi as a scratch register.
- __ movd(xmm0, args[0]);
+ __ movd(xmm0, args.GetReceiverOperand());
__ LoadRoot(edx, RootIndex::kUndefinedValue);
__ mov(edi, edx);
- __ test(eax, eax);
- __ j(zero, &no_this_arg, Label::kNear);
+ if (kJSArgcIncludesReceiver) {
+ __ cmp(eax, Immediate(JSParameterCount(0)));
+ __ j(equal, &no_this_arg, Label::kNear);
+ } else {
+ __ test(eax, eax);
+ __ j(zero, &no_this_arg, Label::kNear);
+ }
{
__ mov(edi, args[1]);
- __ cmp(eax, Immediate(1));
+ __ cmp(eax, Immediate(JSParameterCount(1)));
__ j(equal, &no_arg_array, Label::kNear);
__ mov(edx, args[2]);
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
- __ DropArgumentsAndPushNewReceiver(eax, edi, ecx,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ eax, edi, ecx, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// Restore receiver to edi.
__ movd(edi, xmm0);
@@ -1940,7 +2011,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// arguments to the receiver.
__ bind(&no_arguments);
{
- __ Move(eax, 0);
+ __ Move(eax, JSParameterCount(0));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
@@ -1954,7 +2025,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// ...
// esp[8 * n] : Argument n-1
// esp[8 * (n + 1)] : Argument n
- // eax contains the number of arguments, n, not counting the receiver.
+ // eax contains the number of arguments, n.
// 1. Get the callable to call (passed as receiver) from the stack.
{
@@ -1969,8 +2040,13 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 3. Make sure we have at least one argument.
{
Label done;
- __ test(eax, eax);
- __ j(not_zero, &done, Label::kNear);
+ if (kJSArgcIncludesReceiver) {
+ __ cmp(eax, Immediate(JSParameterCount(0)));
+ __ j(greater, &done, Label::kNear);
+ } else {
+ __ test(eax, eax);
+ __ j(not_zero, &done, Label::kNear);
+ }
__ PushRoot(RootIndex::kUndefinedValue);
__ inc(eax);
__ bind(&done);
@@ -2004,12 +2080,12 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadRoot(edi, RootIndex::kUndefinedValue);
__ mov(edx, edi);
__ mov(ecx, edi);
- __ cmp(eax, Immediate(1));
+ __ cmp(eax, Immediate(JSParameterCount(1)));
__ j(below, &done, Label::kNear);
__ mov(edi, args[1]); // target
__ j(equal, &done, Label::kNear);
__ mov(ecx, args[2]); // thisArgument
- __ cmp(eax, Immediate(3));
+ __ cmp(eax, Immediate(JSParameterCount(3)));
__ j(below, &done, Label::kNear);
__ mov(edx, args[3]); // argumentsList
__ bind(&done);
@@ -2017,9 +2093,10 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// Spill argumentsList to use edx as a scratch register.
__ movd(xmm0, edx);
- __ DropArgumentsAndPushNewReceiver(eax, ecx, edx,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ eax, ecx, edx, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// Restore argumentsList.
__ movd(edx, xmm0);
@@ -2061,13 +2138,13 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadRoot(edi, RootIndex::kUndefinedValue);
__ mov(edx, edi);
__ mov(ecx, edi);
- __ cmp(eax, Immediate(1));
+ __ cmp(eax, Immediate(JSParameterCount(1)));
__ j(below, &done, Label::kNear);
__ mov(edi, args[1]); // target
__ mov(edx, edi);
__ j(equal, &done, Label::kNear);
__ mov(ecx, args[2]); // argumentsList
- __ cmp(eax, Immediate(3));
+ __ cmp(eax, Immediate(JSParameterCount(3)));
__ j(below, &done, Label::kNear);
__ mov(edx, args[3]); // new.target
__ bind(&done);
@@ -2078,7 +2155,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ DropArgumentsAndPushNewReceiver(
eax, masm->RootAsOperand(RootIndex::kUndefinedValue), ecx,
TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// Restore argumentsList.
__ movd(ecx, xmm0);
@@ -2105,6 +2183,59 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+namespace {
+
+// Allocate new stack space for |count| arguments and shift all existing
+// arguments already on the stack. |pointer_to_new_space_out| points to the
+// first free slot on the stack to copy additional arguments to and
+// |argc_in_out| is updated to include |count|.
+void Generate_AllocateSpaceAndShiftExistingArguments(
+ MacroAssembler* masm, Register count, Register argc_in_out,
+ Register pointer_to_new_space_out, Register scratch1, Register scratch2) {
+ DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
+ scratch2));
+ // Use pointer_to_new_space_out as scratch until we set it to the correct
+ // value at the end.
+ Register old_esp = pointer_to_new_space_out;
+ Register new_space = scratch1;
+ __ mov(old_esp, esp);
+
+ __ lea(new_space, Operand(count, times_system_pointer_size, 0));
+ __ AllocateStackSpace(new_space);
+
+ if (!kJSArgcIncludesReceiver) {
+ __ inc(argc_in_out);
+ }
+ Register current = scratch1;
+ Register value = scratch2;
+
+ Label loop, entry;
+ __ mov(current, 0);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(value, Operand(old_esp, current, times_system_pointer_size, 0));
+ __ mov(Operand(esp, current, times_system_pointer_size, 0), value);
+ __ inc(current);
+ __ bind(&entry);
+ __ cmp(current, argc_in_out);
+ __ j(less_equal, &loop, Label::kNear);
+
+ // Point to the next free slot above the shifted arguments (argc + 1 slot for
+ // the return address).
+ __ lea(
+ pointer_to_new_space_out,
+ Operand(esp, argc_in_out, times_system_pointer_size, kSystemPointerSize));
+ // Update the total number of arguments.
+ if (kJSArgcIncludesReceiver) {
+ __ add(argc_in_out, count);
+ } else {
+ // Also subtract the receiver again.
+ __ lea(argc_in_out, Operand(argc_in_out, count, times_1, -1));
+ }
+}
+
+} // namespace
+
// static
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
@@ -2112,17 +2243,15 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- edi : target
// -- esi : context for the Call / Construct builtin
- // -- eax : number of parameters on the stack (not including the receiver)
+ // -- eax : number of parameters on the stack
// -- ecx : len (number of elements to from args)
- // -- ecx : new.target (checked to be constructor or undefined)
+ // -- edx : new.target (checked to be constructor or undefined)
// -- esp[4] : arguments list (a FixedArray)
// -- esp[0] : return address.
// -----------------------------------
- // We need to preserve eax, edi, esi and ebx.
- __ movd(xmm0, edx);
- __ movd(xmm1, edi);
- __ movd(xmm2, eax);
+ __ movd(xmm0, edx); // Spill new.target.
+ __ movd(xmm1, edi); // Spill target.
__ movd(xmm3, esi); // Spill the context.
const Register kArgumentsList = esi;
@@ -2157,32 +2286,15 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ StackOverflowCheck(kArgumentsLength, edx, &stack_overflow);
__ movd(xmm4, kArgumentsList); // Spill the arguments list.
-
// Move the arguments already in the stack,
// including the receiver and the return address.
- {
- Label copy, check;
- Register src = edx, current = edi, tmp = esi;
- // Update stack pointer.
- __ mov(src, esp);
- __ lea(tmp, Operand(kArgumentsLength, times_system_pointer_size, 0));
- __ AllocateStackSpace(tmp);
- // Include return address and receiver.
- __ add(eax, Immediate(2));
- __ mov(current, Immediate(0));
- __ jmp(&check);
- // Loop.
- __ bind(&copy);
- __ mov(tmp, Operand(src, current, times_system_pointer_size, 0));
- __ mov(Operand(esp, current, times_system_pointer_size, 0), tmp);
- __ inc(current);
- __ bind(&check);
- __ cmp(current, eax);
- __ j(less, &copy);
- __ lea(edx, Operand(esp, eax, times_system_pointer_size, 0));
- }
-
+ // kArgumentsLength (ecx): Number of arguments to make room for.
+ // eax: Number of arguments already on the stack.
+ // edx: Points to first free slot on the stack after arguments were shifted.
+ Generate_AllocateSpaceAndShiftExistingArguments(masm, kArgumentsLength, eax,
+ edx, edi, esi);
__ movd(kArgumentsList, xmm4); // Recover arguments list.
+ __ movd(xmm2, eax); // Spill argument count.
// Push additional arguments onto the stack.
{
@@ -2207,12 +2319,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Restore eax, edi and edx.
__ movd(esi, xmm3); // Restore the context.
- __ movd(eax, xmm2);
- __ movd(edi, xmm1);
- __ movd(edx, xmm0);
-
- // Compute the actual parameter count.
- __ add(eax, kArgumentsLength);
+ __ movd(eax, xmm2); // Restore argument count.
+ __ movd(edi, xmm1); // Restore target.
+ __ movd(edx, xmm0); // Restore new.target.
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
@@ -2227,7 +2336,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edi : the target to call (can be any Object)
// -- esi : context for the Call / Construct builtin
// -- edx : the new target (for [[Construct]] calls)
@@ -2261,12 +2370,14 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Label stack_done, stack_overflow;
__ mov(edx, Operand(ebp, StandardFrameConstants::kArgCOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ dec(edx);
+ }
__ sub(edx, ecx);
__ j(less_equal, &stack_done);
{
// ----------- S t a t e -------------
- // -- eax : the number of arguments already in the stack (not including the
- // receiver)
+ // -- eax : the number of arguments already in the stack
// -- ecx : start index (to support rest parameters)
// -- edx : number of arguments to copy, i.e. arguments count - start index
// -- edi : the target to call (can be any Object)
@@ -2284,31 +2395,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Move the arguments already in the stack,
// including the receiver and the return address.
- {
- Label copy, check;
- Register src = esi, current = edi;
- // Update stack pointer.
- __ mov(src, esp);
- __ lea(scratch, Operand(edx, times_system_pointer_size, 0));
- __ AllocateStackSpace(scratch);
- // Include return address and receiver.
- __ add(eax, Immediate(2));
- __ Move(current, 0);
- __ jmp(&check);
- // Loop.
- __ bind(&copy);
- __ mov(scratch, Operand(src, current, times_system_pointer_size, 0));
- __ mov(Operand(esp, current, times_system_pointer_size, 0), scratch);
- __ inc(current);
- __ bind(&check);
- __ cmp(current, eax);
- __ j(less, &copy);
- __ lea(esi, Operand(esp, eax, times_system_pointer_size, 0));
- }
-
- // Update total number of arguments.
- __ sub(eax, Immediate(2));
- __ add(eax, edx);
+ // edx: Number of arguments to make room for.
+ // eax: Number of arguments already on the stack.
+ // esi: Points to first free slot on the stack after arguments were shifted.
+ Generate_AllocateSpaceAndShiftExistingArguments(masm, edx, eax, esi, ebx,
+ edi);
// Point to the first argument to copy (skipping receiver).
__ lea(ecx, Operand(ecx, times_system_pointer_size,
@@ -2350,7 +2441,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edi : the function to call (checked to be a JSFunction)
// -----------------------------------
StackArgumentsAccessor args(eax);
@@ -2376,7 +2467,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ j(not_zero, &done_convert);
{
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edx : the shared function info.
// -- edi : the function to call (checked to be a JSFunction)
// -- esi : the function context.
@@ -2434,7 +2525,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ bind(&done_convert);
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edx : the shared function info.
// -- edi : the function to call (checked to be a JSFunction)
// -- esi : the function context.
@@ -2456,7 +2547,7 @@ namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edx : new.target (only in case of [[Construct]])
// -- edi : target (checked to be a JSBoundFunction)
// -----------------------------------
@@ -2471,7 +2562,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ j(zero, &no_bound_arguments);
{
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- xmm0 : new.target (only in case of [[Construct]])
// -- edi : target (checked to be a JSBoundFunction)
// -- ecx : the [[BoundArguments]] (implemented as FixedArray)
@@ -2539,7 +2630,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edi : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(edi);
@@ -2561,7 +2652,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edi : the target to call (can be any Object).
// -----------------------------------
StackArgumentsAccessor args(eax);
@@ -2618,7 +2709,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edx : the new target (checked to be a constructor)
// -- edi : the constructor to call (checked to be a JSFunction)
// -----------------------------------
@@ -2650,7 +2741,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edx : the new target (checked to be a constructor)
// -- edi : the constructor to call (checked to be a JSBoundFunction)
// -----------------------------------
@@ -2677,7 +2768,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- edi : the constructor to call (can be any Object)
@@ -2768,7 +2859,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
}
// Load deoptimization data from the code object.
- __ mov(ecx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ mov(ecx, Operand(eax, Code::kDeoptimizationDataOrInterpreterDataOffset -
+ kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
__ mov(ecx, Operand(ecx, FixedArray::OffsetOfElementAt(
@@ -4125,8 +4217,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CmpObjectType(code_obj, BASELINE_DATA_TYPE,
- kInterpreterBytecodeOffsetRegister);
+ __ CmpObjectType(code_obj, CODET_TYPE, kInterpreterBytecodeOffsetRegister);
__ j(equal, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -4139,13 +4230,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
- __ CmpObjectType(code_obj, BASELINE_DATA_TYPE,
- kInterpreterBytecodeOffsetRegister);
+ __ CmpObjectType(code_obj, CODET_TYPE, kInterpreterBytecodeOffsetRegister);
__ Assert(equal, AbortReason::kExpectedBaselineData);
}
- // Load baseline code from baseline data.
- __ mov(code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, ecx);
+ }
// Load the feedback vector.
Register feedback_vector = ecx;
diff --git a/deps/v8/src/builtins/loong64/builtins-loong64.cc b/deps/v8/src/builtins/loong64/builtins-loong64.cc
new file mode 100644
index 0000000000..714353fc96
--- /dev/null
+++ b/deps/v8/src/builtins/loong64/builtins-loong64.cc
@@ -0,0 +1,3755 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/api/api-arguments.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
+#include "src/debug/debug.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+#include "src/logging/counters.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
+#include "src/heap/heap-inl.h"
+#include "src/objects/cell.h"
+#include "src/objects/foreign.h"
+#include "src/objects/heap-number.h"
+#include "src/objects/js-generator.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/smi.h"
+#include "src/runtime/runtime.h"
+
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-linkage.h"
+#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
+ __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
+ __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
+ RelocInfo::CODE_TARGET);
+}
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- a0 : actual argument count
+ // -- a1 : target function (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the target function, the new target and the actual
+ // argument count.
+ // Push function as parameter to the runtime call.
+ __ SmiTag(kJavaScriptCallArgCountRegister);
+ __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
+ kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
+
+ __ CallRuntime(function_id, 1);
+ __ LoadCodeObjectEntry(a2, a0);
+ // Restore target function, new target and actual argument count.
+ __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
+ kJavaScriptCallArgCountRegister);
+ __ SmiUntag(kJavaScriptCallArgCountRegister);
+ }
+
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Jump(a2);
+}
+
+namespace {
+
+void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- a3 : new target
+ // -- cp : context
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(a0);
+ __ Push(cp, a0);
+ __ SmiUntag(a0);
+
+ // Set up pointer to last argument (skip receiver).
+ __ Add_d(
+ t2, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
+ // Copy arguments and receiver to the expression stack.
+ __ PushArray(t2, a0, t3, t0);
+ // The receiver for the builtin/api call.
+ __ PushRoot(RootIndex::kTheHoleValue);
+
+ // Call the function.
+ // a0: number of arguments (untagged)
+ // a1: constructor function
+ // a3: new target
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
+
+ // Restore context from the frame.
+ __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ // Restore smi-tagged arguments count from the frame.
+ __ Ld_d(t3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
+ }
+
+ // Remove caller arguments from the stack and return.
+ __ SmiScale(t3, t3, kPointerSizeLog2);
+ __ Add_d(sp, sp, t3);
+ __ Add_d(sp, sp, kPointerSize);
+ __ Ret();
+}
+
+} // namespace
+
+// The construct stub for ES5 constructor functions and ES6 class constructors.
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0: number of arguments (untagged)
+ // -- a1: constructor function
+ // -- a3: new target
+ // -- cp: context
+ // -- ra: return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+ __ EnterFrame(StackFrame::CONSTRUCT);
+
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(a0);
+ __ Push(cp, a0, a1);
+ __ PushRoot(RootIndex::kUndefinedValue);
+ __ Push(a3);
+
+ // ----------- S t a t e -------------
+ // -- sp[0*kPointerSize]: new target
+ // -- sp[1*kPointerSize]: padding
+ // -- a1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+
+ __ Ld_d(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_wu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(t2);
+ __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor,
+ &not_create_implicit_receiver);
+
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, t2,
+ t3);
+ __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
+ __ Branch(&post_instantiation_deopt_entry);
+
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(a0, RootIndex::kTheHoleValue);
+
+ // ----------- S t a t e -------------
+ // -- a0: receiver
+ // -- Slot 4 / sp[0*kPointerSize]: new target
+ // -- Slot 3 / sp[1*kPointerSize]: padding
+ // -- Slot 2 / sp[2*kPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize]: context
+ // -----------------------------------
+ // Deoptimizer enters here.
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
+
+ // Restore new target.
+ __ Pop(a3);
+
+ // Push the allocated receiver to the stack.
+ __ Push(a0);
+
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver. The second copy is pushed after the arguments, we saved in a6
+ // since a0 will store the return value of callRuntime.
+ __ mov(a6, a0);
+
+ // Set up pointer to last argument.
+ __ Add_d(
+ t2, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
+
+ // ----------- S t a t e -------------
+ // -- r3: new target
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- sp[2*kPointerSize]: padding
+ // -- sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
+ // -----------------------------------
+
+ // Restore constructor function and argument count.
+ __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ Ld_d(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(a0);
+
+ Label stack_overflow;
+ __ StackOverflowCheck(a0, t0, t1, &stack_overflow);
+
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
+
+ // Copy arguments and receiver to the expression stack.
+ __ PushArray(t2, a0, t0, t1);
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver. The second copy is pushed after the arguments,
+ __ Push(a6);
+
+ // Call the function.
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
+
+ // ----------- S t a t e -------------
+ // -- s0: constructor result
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: padding
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, leave_and_return, check_receiver;
+
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ JumpIfNotRoot(a0, RootIndex::kUndefinedValue, &check_receiver);
+
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ Ld_d(a0, MemOperand(sp, 0 * kPointerSize));
+ __ JumpIfRoot(a0, RootIndex::kTheHoleValue, &do_throw);
+
+ __ bind(&leave_and_return);
+ // Restore smi-tagged arguments count from the frame.
+ __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
+ __ LeaveFrame(StackFrame::CONSTRUCT);
+
+ // Remove caller arguments from the stack and return.
+ __ SmiScale(a4, a1, kPointerSizeLog2);
+ __ Add_d(sp, sp, a4);
+ __ Add_d(sp, sp, kPointerSize);
+ __ Ret();
+
+ __ bind(&check_receiver);
+ __ JumpIfSmi(a0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ GetObjectType(a0, t2, t2);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ Branch(&leave_and_return, greater_equal, t2,
+ Operand(FIRST_JS_RECEIVER_TYPE));
+ __ Branch(&use_receiver);
+
+ __ bind(&do_throw);
+ // Restore the context from the frame.
+ __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ __ break_(0xCC);
+
+ __ bind(&stack_overflow);
+ // Restore the context from the frame.
+ __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ break_(0xCC);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSBuiltinsConstructStubHelper(masm);
+}
+
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ Ld_d(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
+ Operand(static_cast<int>(CodeKind::BASELINE)));
+}
+
+// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
+// the more general dispatch.
+static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1,
+ Label* is_baseline) {
+ Label done;
+
+ __ GetObjectType(sfi_data, scratch1, scratch1);
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ Branch(&not_baseline, ne, scratch1, Operand(CODET_TYPE));
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ __ Branch(is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
+ }
+ __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
+ __ Ld_d(sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+
+ __ bind(&done);
+}
+
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the value to pass to the generator
+ // -- a1 : the JSGeneratorObject to resume
+ // -- ra : return address
+ // -----------------------------------
+ // Store input value into generator object.
+ __ St_d(a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
+ __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, a0,
+ kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore);
+ // Check that a1 is still valid, RecordWrite might have clobbered it.
+ __ AssertGeneratorObject(a1);
+
+ // Load suspended function and context.
+ __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ Ld_d(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
+
+ // Flood function if we are stepping.
+ Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+ Label stepping_prepared;
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ li(a5, debug_hook);
+ __ Ld_b(a5, MemOperand(a5, 0));
+ __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg));
+
+ // Flood function if we need to continue stepping in the suspended generator.
+ ExternalReference debug_suspended_generator =
+ ExternalReference::debug_suspended_generator_address(masm->isolate());
+ __ li(a5, debug_suspended_generator);
+ __ Ld_d(a5, MemOperand(a5, 0));
+ __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5));
+ __ bind(&stepping_prepared);
+
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label stack_overflow;
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
+ __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
+
+ // ----------- S t a t e -------------
+ // -- a1 : the JSGeneratorObject to resume
+ // -- a4 : generator function
+ // -- cp : generator context
+ // -- ra : return address
+ // -----------------------------------
+
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_hu(
+ a3, FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Ld_d(t1, FieldMemOperand(
+ a1, JSGeneratorObject::kParametersAndRegistersOffset));
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ Sub_d(a3, a3, Operand(1));
+ __ Branch(&done_loop, lt, a3, Operand(zero_reg));
+ __ Alsl_d(kScratchReg, a3, t1, kPointerSizeLog2, t7);
+ __ Ld_d(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
+ __ Push(kScratchReg);
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ // Push receiver.
+ __ Ld_d(kScratchReg,
+ FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
+ __ Push(kScratchReg);
+ }
+
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ Label is_baseline;
+ __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_d(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, t5, &is_baseline);
+ __ GetObjectType(a3, a3, a3);
+ __ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
+ Operand(BYTECODE_ARRAY_TYPE));
+ __ bind(&is_baseline);
+ }
+
+ // Resume (Ignition/TurboFan) generator object.
+ {
+ __ Ld_d(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_hu(a0, FieldMemOperand(
+ a0, SharedFunctionInfo::kFormalParameterCountOffset));
+ // We abuse new.target both to indicate that this is a resume call and to
+ // pass in the generator object. In ordinary calls, new.target is always
+ // undefined because generator functions are non-constructable.
+ __ Move(a3, a1);
+ __ Move(a1, a4);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ JumpCodeObject(a2);
+ }
+
+ __ bind(&prepare_step_in_if_stepping);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a4);
+ // Push hole as receiver since we do not use it for stepping.
+ __ PushRoot(RootIndex::kTheHoleValue);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
+ __ Pop(a1);
+ }
+ __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ Branch(&stepping_prepared);
+
+ __ bind(&prepare_step_in_suspended_generator);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+ __ Pop(a1);
+ }
+ __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ Branch(&stepping_prepared);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ break_(0xCC); // This should be unreachable.
+ }
+}
+
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+}
+
+// Clobbers scratch1 and scratch2; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
+ Register scratch1, Register scratch2) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit);
+ // Make a2 the space we have left. The stack might already be overflowed
+ // here which will cause r2 to become negative.
+ __ sub_d(scratch1, sp, scratch1);
+ // Check if the arguments will overflow the stack.
+ __ slli_d(scratch2, argc, kPointerSizeLog2);
+ __ Branch(&okay, gt, scratch1, Operand(scratch2)); // Signed comparison.
+
+ // Out of stack space.
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+
+ __ bind(&okay);
+}
+
+namespace {
+
+// Called with the native C calling convention. The corresponding function
+// signature is either:
+//
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, Address new_target, Address target,
+// Address receiver, intptr_t argc, Address** args)>;
+// or
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, MicrotaskQueue* microtask_queue)>;
+void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
+ Builtin entry_trampoline) {
+ Label invoke, handler_entry, exit;
+
+ {
+ NoRootArrayScope no_root_array(masm);
+
+ // Registers:
+ // either
+ // a0: root register value
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+ // or
+ // a0: root register value
+ // a1: microtask_queue
+
+ // Save callee saved registers on the stack.
+ __ MultiPush(kCalleeSaved | ra.bit());
+
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(kCalleeSavedFPU);
+ // Set up the reserved register for 0.0.
+ __ Move(kDoubleRegZero, 0.0);
+
+ // Initialize the root register.
+ // C calling convention. The first argument is passed in a0.
+ __ mov(kRootRegister, a0);
+ }
+
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+
+ // We build an EntryFrame.
+ __ li(s1, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ __ li(s2, Operand(StackFrame::TypeToMarker(type)));
+ __ li(s3, Operand(StackFrame::TypeToMarker(type)));
+ ExternalReference c_entry_fp = ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, masm->isolate());
+ __ li(s5, c_entry_fp);
+ __ Ld_d(s4, MemOperand(s5, 0));
+ __ Push(s1, s2, s3, s4);
+
+ // Clear c_entry_fp, now we've pushed its previous value to the stack.
+ // If the c_entry_fp is not already zero and we don't clear it, the
+ // SafeStackFrameIterator will assume we are executing C++ and miss the JS
+ // frames on top.
+ __ St_d(zero_reg, MemOperand(s5, 0));
+
+ // Set up frame pointer for the frame to be pushed.
+ __ addi_d(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Registers:
+ // either
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+ // or
+ // a1: microtask_queue
+ //
+ // Stack:
+ // caller fp |
+ // function slot | entry frame
+ // context slot |
+ // bad fp (0xFF...F) |
+ // callee saved registers + ra
+ // [ O32: 4 args slots]
+ // args
+
+ // If this is the outermost JS call, set js_entry_sp value.
+ Label non_outermost_js;
+ ExternalReference js_entry_sp = ExternalReference::Create(
+ IsolateAddressId::kJSEntrySPAddress, masm->isolate());
+ __ li(s1, js_entry_sp);
+ __ Ld_d(s2, MemOperand(s1, 0));
+ __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg));
+ __ St_d(fp, MemOperand(s1, 0));
+ __ li(s3, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ Label cont;
+ __ b(&cont);
+ __ nop(); // Branch delay slot nop.
+ __ bind(&non_outermost_js);
+ __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME));
+ __ bind(&cont);
+ __ Push(s3);
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+ __ bind(&handler_entry);
+
+ // Store the current pc as the handler offset. It's used later to create the
+ // handler table.
+ masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
+
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushStackHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ li(s1, ExternalReference::Create(
+ IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
+ __ St_d(a0,
+ MemOperand(s1, 0)); // We come back from 'invoke'. result is in a0.
+ __ LoadRoot(a0, RootIndex::kException);
+ __ b(&exit); // b exposes branch delay slot.
+ __ nop(); // Branch delay slot nop.
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushStackHandler();
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the bal(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+ //
+ // Registers:
+ // either
+ // a0: root register value
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+ // or
+ // a0: root register value
+ // a1: microtask_queue
+ //
+ // Stack:
+ // handler frame
+ // entry frame
+ // callee saved registers + ra
+ // [ O32: 4 args slots]
+ // args
+ //
+ // Invoke the function by calling through JS entry trampoline builtin and
+ // pop the faked function when we return.
+
+ Handle<Code> trampoline_code =
+ masm->isolate()->builtins()->code_handle(entry_trampoline);
+ __ Call(trampoline_code, RelocInfo::CODE_TARGET);
+
+ // Unlink this frame from the handler chain.
+ __ PopStackHandler();
+
+ __ bind(&exit); // a0 holds result
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ Pop(a5);
+ __ Branch(&non_outermost_js_2, ne, a5,
+ Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ li(a5, js_entry_sp);
+ __ St_d(zero_reg, MemOperand(a5, 0));
+ __ bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ Pop(a5);
+ __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ St_d(a5, MemOperand(a4, 0));
+
+ // Reset the stack to the callee saved registers.
+ __ addi_d(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Restore callee-saved fpu registers.
+ __ MultiPopFPU(kCalleeSavedFPU);
+
+ // Restore callee saved registers from the stack.
+ __ MultiPop(kCalleeSaved | ra.bit());
+ // Return.
+ __ Jump(ra);
+}
+
+} // namespace
+
+void Builtins::Generate_JSEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
+}
+
+void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
+ Builtin::kJSConstructEntryTrampoline);
+}
+
+void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtin::kRunMicrotasksTrampoline);
+}
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ // ----------- S t a t e -------------
+ // -- a1: new.target
+ // -- a2: function
+ // -- a3: receiver_pointer
+ // -- a4: argc
+ // -- a5: argv
+ // -----------------------------------
+
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Setup the context (we need to use the caller context from the isolate).
+ ExternalReference context_address = ExternalReference::Create(
+ IsolateAddressId::kContextAddress, masm->isolate());
+ __ li(cp, context_address);
+ __ Ld_d(cp, MemOperand(cp, 0));
+
+ // Push the function and the receiver onto the stack.
+ __ Push(a2);
+
+ // Check if we have enough stack space to push all arguments.
+ __ addi_d(a6, a4, 1);
+ Generate_CheckStackOverflow(masm, a6, a0, s2);
+
+ // Copy arguments to the stack in a loop.
+ // a4: argc
+ // a5: argv, i.e. points to first arg
+ Label loop, entry;
+ __ Alsl_d(s1, a4, a5, kPointerSizeLog2, t7);
+ __ b(&entry);
+ // s1 points past last arg.
+ __ bind(&loop);
+ __ addi_d(s1, s1, -kPointerSize);
+ __ Ld_d(s2, MemOperand(s1, 0)); // Read next parameter.
+ __ Ld_d(s2, MemOperand(s2, 0)); // Dereference handle.
+ __ Push(s2); // Push parameter.
+ __ bind(&entry);
+ __ Branch(&loop, ne, a5, Operand(s1));
+
+ // Push the receive.
+ __ Push(a3);
+
+ // a0: argc
+ // a1: function
+ // a3: new.target
+ __ mov(a3, a1);
+ __ mov(a1, a2);
+ __ mov(a0, a4);
+
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ __ LoadRoot(a4, RootIndex::kUndefinedValue);
+ __ mov(a5, a4);
+ __ mov(s1, a4);
+ __ mov(s2, a4);
+ __ mov(s3, a4);
+ __ mov(s4, a4);
+ __ mov(s5, a4);
+ // s6 holds the root address. Do not clobber.
+ // s7 is cp. Do not init.
+
+ // Invoke the code.
+ Handle<Code> builtin = is_construct
+ ? BUILTIN_CODE(masm->isolate(), Construct)
+ : masm->isolate()->builtins()->Call();
+ __ Call(builtin, RelocInfo::CODE_TARGET);
+
+ // Leave internal frame.
+ }
+ __ Jump(ra);
+}
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
+ // a1: microtask_queue
+ __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1);
+ __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
+}
+
+static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
+ Register optimized_code,
+ Register closure) {
+ DCHECK(!AreAliased(optimized_code, closure));
+ // Store code entry in the closure.
+ __ St_d(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
+ __ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
+ kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
+ RememberedSetAction::kOmit, SmiCheck::kOmit);
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+ Register scratch2) {
+ Register params_size = scratch1;
+
+ // Get the size of the formal parameters + receiver (in bytes).
+ __ Ld_d(params_size,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Ld_w(params_size,
+ FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
+
+ Register actual_params_size = scratch2;
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Ld_d(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ slli_d(actual_params_size, actual_params_size, kPointerSizeLog2);
+ __ Add_d(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ __ slt(t2, params_size, actual_params_size);
+ __ Movn(params_size, actual_params_size, t2);
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::INTERPRETED);
+
+ // Drop receiver + arguments.
+ __ Add_d(sp, sp, params_size);
+}
+
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ Branch(&no_match, ne, actual_marker, Operand(expected_marker));
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register optimized_code_entry) {
+ // ----------- S t a t e -------------
+ // -- a0 : actual argument count
+ // -- a3 : new target (preserved for callee if needed, and caller)
+ // -- a1 : target function (preserved for callee if needed, and caller)
+ // -----------------------------------
+ DCHECK(!AreAliased(optimized_code_entry, a1, a3));
+
+ Register closure = a1;
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
+ &heal_optimized_code_slot);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ __ Ld_d(a6, FieldMemOperand(optimized_code_entry,
+ Code::kCodeDataContainerOffset));
+ __ Ld_w(a6, FieldMemOperand(a6, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ And(a6, a6, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Branch(&heal_optimized_code_slot, ne, a6, Operand(zero_reg));
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
+
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ LoadCodeObjectEntry(a2, optimized_code_entry);
+ __ Jump(a2);
+
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
+}
+
+static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
+ Register optimization_marker) {
+ // ----------- S t a t e -------------
+ // -- a0 : actual argument count
+ // -- a3 : new target (preserved for callee if needed, and caller)
+ // -- a1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -----------------------------------
+ DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
+
+ // TODO(v8:8394): The logging of first execution will break if
+ // feedback vectors are not allocated. We need to find a different way of
+ // logging these events if required.
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
+ if (FLAG_debug_code) {
+ __ stop();
+ }
+}
+
+// Advance the current bytecode offset. This simulates what all bytecode
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode. Will not advance
+// the bytecode offset if the current bytecode is a JumpLoop, instead just
+// re-executing the JumpLoop to jump to the correct bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Register scratch2, Register scratch3,
+ Label* if_return) {
+ Register bytecode_size_table = scratch1;
+
+ // The bytecode offset value will be increased by one in wide and extra wide
+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
+ // will restore the original bytecode. In order to simplify the code, we have
+ // a backup of it.
+ Register original_bytecode_offset = scratch3;
+ DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
+ bytecode_size_table, original_bytecode_offset));
+ __ Move(original_bytecode_offset, bytecode_offset);
+ __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address());
+
+ // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
+ Label process_bytecode, extra_wide;
+ STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
+ STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
+ STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
+ STATIC_ASSERT(3 ==
+ static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
+ __ Branch(&process_bytecode, hi, bytecode, Operand(3));
+ __ And(scratch2, bytecode, Operand(1));
+ __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg));
+
+ // Load the next bytecode and update table to the wide scaled table.
+ __ Add_d(bytecode_offset, bytecode_offset, Operand(1));
+ __ Add_d(scratch2, bytecode_array, bytecode_offset);
+ __ Ld_bu(bytecode, MemOperand(scratch2, 0));
+ __ Add_d(bytecode_size_table, bytecode_size_table,
+ Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
+ __ jmp(&process_bytecode);
+
+ __ bind(&extra_wide);
+ // Load the next bytecode and update table to the extra wide scaled table.
+ __ Add_d(bytecode_offset, bytecode_offset, Operand(1));
+ __ Add_d(scratch2, bytecode_array, bytecode_offset);
+ __ Ld_bu(bytecode, MemOperand(scratch2, 0));
+ __ Add_d(bytecode_size_table, bytecode_size_table,
+ Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
+
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ Branch(if_return, eq, bytecode, \
+ Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning
+ // of the loop.
+ Label end, not_jump_loop;
+ __ Branch(&not_jump_loop, ne, bytecode,
+ Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
+ // We need to restore the original bytecode_offset since we might have
+ // increased it to skip the wide / extra-wide prefix bytecode.
+ __ Move(bytecode_offset, original_bytecode_offset);
+ __ jmp(&end);
+
+ __ bind(&not_jump_loop);
+ // Otherwise, load the size of the current bytecode and advance the offset.
+ __ Add_d(scratch2, bytecode_size_table, bytecode);
+ __ Ld_b(scratch2, MemOperand(scratch2, 0));
+ __ Add_d(bytecode_offset, bytecode_offset, scratch2);
+
+ __ bind(&end);
+}
+
+// Read off the optimization state in the feedback vector and check if there
+// is optimized code or a optimization marker that needs to be processed.
+static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+ MacroAssembler* masm, Register optimization_state, Register feedback_vector,
+ Label* has_optimized_code_or_marker) {
+ ASM_CODE_COMMENT(masm);
+ Register scratch = t2;
+ // TODO(liuyu): Remove CHECK
+ CHECK_NE(t2, optimization_state);
+ CHECK_NE(t2, feedback_vector);
+ __ Ld_w(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+ __ And(
+ scratch, optimization_state,
+ Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ Branch(has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
+}
+
+static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
+ MacroAssembler* masm, Register optimization_state,
+ Register feedback_vector) {
+ ASM_CODE_COMMENT(masm);
+ Label maybe_has_optimized_code;
+ // Check if optimized code marker is available
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ And(
+ scratch, optimization_state,
+ Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
+ }
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+
+ __ bind(&maybe_has_optimized_code);
+ Register optimized_code_entry = optimization_state;
+ __ Ld_d(optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
+
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry);
+}
+
+// static
+void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
+ UseScratchRegisterScope temps(masm);
+ temps.Include(s1.bit() | s2.bit());
+ temps.Exclude(t7.bit());
+ auto descriptor =
+ Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
+ Register closure = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ // Load the feedback vector from the closure.
+ Register feedback_vector = temps.Acquire();
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ GetObjectType(feedback_vector, scratch, scratch);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector, scratch,
+ Operand(FEEDBACK_VECTOR_TYPE));
+ }
+ // Check for an optimization marker.
+ Label has_optimized_code_or_marker;
+ Register optimization_state = no_reg;
+ {
+ UseScratchRegisterScope temps(masm);
+ optimization_state = temps.Acquire();
+ // optimization_state will be used only in |has_optimized_code_or_marker|
+ // and outside it can be reused.
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector,
+ &has_optimized_code_or_marker);
+ }
+ // Increment invocation count for the function.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register invocation_count = temps.Acquire();
+ __ Ld_w(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ __ Add_w(invocation_count, invocation_count, Operand(1));
+ __ St_w(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ }
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
+ // Normally the first thing we'd do here is Push(ra, fp), but we already
+ // entered the frame in BaselineCompiler::Prologue, as we had to use the
+ // value ra before the call to this BaselineOutOfLinePrologue builtin.
+ Register callee_context = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kCalleeContext);
+ Register callee_js_function = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ __ Push(callee_context, callee_js_function);
+ DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
+ DCHECK_EQ(callee_js_function, kJSFunctionRegister);
+
+ Register argc = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
+ // We'll use the bytecode for both code age/OSR resetting, and pushing onto
+ // the frame, so load it into a register.
+ Register bytecodeArray = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
+
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
+ // are 8-bit fields next to each other, so we could just optimize by writing
+ // a 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ St_h(zero_reg,
+ FieldMemOperand(bytecodeArray,
+ BytecodeArray::kOsrLoopNestingLevelOffset));
+
+ __ Push(argc, bytecodeArray);
+
+ // Baseline code frames store the feedback vector where interpreter would
+ // store the bytecode offset.
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(masm);
+ Register invocation_count = temps.Acquire();
+ __ GetObjectType(feedback_vector, invocation_count, invocation_count);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count,
+ Operand(FEEDBACK_VECTOR_TYPE));
+ }
+ // Our stack is currently aligned. We have have to push something along with
+ // the feedback vector to keep it that way -- we may as well start
+ // initialising the register frame.
+ // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
+ // `undefined` in the accumulator register, to skip the load in the baseline
+ // code.
+ __ Push(feedback_vector);
+ }
+
+ Label call_stack_guard;
+ Register frame_size = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
+ // Stack check. This folds the checks for both the interrupt stack limit
+ // check and the real stack limit into one by just checking for the
+ // interrupt limit. The interrupt limit is either equal to the real stack
+ // limit or tighter. By ensuring we have space until that limit after
+ // building the frame we can quickly precheck both at once.
+ UseScratchRegisterScope temps(masm);
+ Register sp_minus_frame_size = temps.Acquire();
+ __ Sub_d(sp_minus_frame_size, sp, frame_size);
+ Register interrupt_limit = temps.Acquire();
+ __ LoadStackLimit(interrupt_limit,
+ MacroAssembler::StackLimitKind::kInterruptStackLimit);
+ __ Branch(&call_stack_guard, Uless, sp_minus_frame_size,
+ Operand(interrupt_limit));
+ }
+
+ // Do "fast" return to the caller pc in ra.
+ // TODO(v8:11429): Document this frame setup better.
+ __ Ret();
+
+ __ bind(&has_optimized_code_or_marker);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
+ UseScratchRegisterScope temps(masm);
+ temps.Exclude(optimization_state);
+ // Ensure the optimization_state is not allocated again.
+ // Drop the frame created by the baseline call.
+ __ Pop(ra, fp);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
+ __ Trap();
+ }
+
+ __ bind(&call_stack_guard);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Save incoming new target or generator
+ __ Push(kJavaScriptCallNewTargetRegister);
+ __ SmiTag(frame_size);
+ __ Push(frame_size);
+ __ CallRuntime(Runtime::kStackGuardWithGap);
+ __ Pop(kJavaScriptCallNewTargetRegister);
+ }
+ __ Ret();
+ temps.Exclude(s1.bit() | s2.bit());
+}
+
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right.
+//
+// The live registers are:
+// o a0 : actual argument count (not including the receiver)
+// o a1: the JS function object being called.
+// o a3: the incoming new target or generator object
+// o cp: our context
+// o fp: the caller's frame pointer
+// o sp: stack pointer
+// o ra: return address
+//
+// The function builds an interpreter frame. See InterpreterFrameConstants in
+// frame-constants.h for its layout.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ Register closure = a1;
+ Register feedback_vector = a2;
+
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
+ __ Ld_d(kScratchReg,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_d(
+ kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
+ Label is_baseline;
+ GetSharedFunctionInfoBytecodeOrBaseline(
+ masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline);
+
+ // The bytecode array could have been flushed from the shared function info,
+ // if so, call into CompileLazy.
+ Label compile_lazy;
+ __ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg);
+ __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
+
+ // Load the feedback vector from the closure.
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label push_stack_frame;
+ // Check if feedback vector is valid. If valid, check for optimized code
+ // and update invocation count. Otherwise, setup the stack frame.
+ __ Ld_d(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ Ld_hu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
+ __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
+
+ // Read off the optimization state in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ Register optimization_state = a4;
+ __ Ld_w(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+
+ // Check if the optimized code slot is not empty or has a optimization marker.
+ Label has_optimized_code_or_marker;
+
+ __ andi(t0, optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask);
+ __ Branch(&has_optimized_code_or_marker, ne, t0, Operand(zero_reg));
+
+ Label not_optimized;
+ __ bind(&not_optimized);
+
+ // Increment invocation count for the function.
+ __ Ld_w(a4, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ __ Add_w(a4, a4, Operand(1));
+ __ St_w(a4, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ __ bind(&push_stack_frame);
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ PushStandardFrame(closure);
+
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
+ // 8-bit fields next to each other, so we could just optimize by writing a
+ // 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ St_h(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrLoopNestingLevelOffset));
+
+ // Load initial bytecode offset.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ // Push bytecode array and Smi tagged bytecode array offset.
+ __ SmiTag(a4, kInterpreterBytecodeOffsetRegister);
+ __ Push(kInterpreterBytecodeArrayRegister, a4);
+
+ // Allocate the local and temporary register file on the stack.
+ Label stack_overflow;
+ {
+ // Load frame size (word) from the BytecodeArray object.
+ __ Ld_w(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ __ Sub_d(a5, sp, Operand(a4));
+ __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit);
+ __ Branch(&stack_overflow, lo, a5, Operand(a2));
+
+ // If ok, push undefined as the initial value for all register file entries.
+ Label loop_header;
+ Label loop_check;
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ __ Branch(&loop_check);
+ __ bind(&loop_header);
+ // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+ __ Push(kInterpreterAccumulatorRegister);
+ // Continue loop if not done.
+ __ bind(&loop_check);
+ __ Sub_d(a4, a4, Operand(kPointerSize));
+ __ Branch(&loop_header, ge, a4, Operand(zero_reg));
+ }
+
+ // If the bytecode array has a valid incoming new target or generator object
+ // register, initialize it with incoming value which was passed in r3.
+ Label no_incoming_new_target_or_generator_register;
+ __ Ld_w(a5, FieldMemOperand(
+ kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
+ __ Branch(&no_incoming_new_target_or_generator_register, eq, a5,
+ Operand(zero_reg));
+ __ Alsl_d(a5, a5, fp, kPointerSizeLog2, t7);
+ __ St_d(a3, MemOperand(a5, 0));
+ __ bind(&no_incoming_new_target_or_generator_register);
+
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ __ LoadStackLimit(a5, MacroAssembler::StackLimitKind::kInterruptStackLimit);
+ __ Branch(&stack_check_interrupt, lo, sp, Operand(a5));
+ __ bind(&after_stack_check_interrupt);
+
+ // The accumulator is already loaded with undefined.
+
+ // Load the dispatch table into a register and dispatch to the bytecode
+ // handler at the current bytecode offset.
+ Label do_dispatch;
+ __ bind(&do_dispatch);
+ __ li(kInterpreterDispatchTableRegister,
+ ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
+ __ Add_d(t5, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ Ld_bu(a7, MemOperand(t5, 0));
+ __ Alsl_d(kScratchReg, a7, kInterpreterDispatchTableRegister,
+ kPointerSizeLog2, t7);
+ __ Ld_d(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg, 0));
+ __ Call(kJavaScriptCallCodeStartRegister);
+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
+
+ // Any returns to the entry trampoline are either due to the return bytecode
+ // or the interpreter tail calling a builtin and then a dispatch.
+
+ // Get bytecode array and bytecode offset from the stack frame.
+ __ Ld_d(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Ld_d(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Either return, or advance to the next bytecode and dispatch.
+ Label do_return;
+ __ Add_d(a1, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ Ld_bu(a1, MemOperand(a1, 0));
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ a4, &do_return);
+ __ jmp(&do_dispatch);
+
+ __ bind(&do_return);
+ // The return value is in a0.
+ LeaveInterpreterFrame(masm, t0, t1);
+ __ Jump(ra);
+
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset)));
+ __ St_d(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ Ld_d(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ __ SmiTag(a5, kInterpreterBytecodeOffsetRegister);
+ __ St_d(a5, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ __ jmp(&after_stack_check_interrupt);
+
+ __ bind(&has_optimized_code_or_marker);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
+
+ __ bind(&is_baseline);
+ {
+ // Load the feedback vector from the closure.
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ Ld_d(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ Ld_hu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
+
+ // Check for an optimization marker.
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector,
+ &has_optimized_code_or_marker);
+
+ // Load the baseline code into the closure.
+ __ Move(a2, kInterpreterBytecodeArrayRegister);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ ReplaceClosureCodeWithOptimizedCode(masm, a2, closure);
+ __ JumpCodeObject(a2);
+
+ __ bind(&install_baseline_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
+ }
+
+ __ bind(&compile_lazy);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+ // Unreachable code.
+ __ break_(0xCC);
+
+ __ bind(&stack_overflow);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+}
+
+static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
+ Register start_address,
+ Register scratch, Register scratch2) {
+ // Find the address of the last argument.
+ __ Sub_d(scratch, num_args, Operand(1));
+ __ slli_d(scratch, scratch, kPointerSizeLog2);
+ __ Sub_d(start_address, start_address, scratch);
+
+ // Push the arguments.
+ __ PushArray(start_address, num_args, scratch, scratch2,
+ TurboAssembler::PushArrayOrder::kReverse);
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsThenCallImpl(
+ MacroAssembler* masm, ConvertReceiverMode receiver_mode,
+ InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a2 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- a1 : the target to call (can be any Object).
+ // -----------------------------------
+ Label stack_overflow;
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // The spread argument should not be pushed.
+ __ Sub_d(a0, a0, Operand(1));
+ }
+
+ __ Add_d(a3, a0, Operand(1)); // Add one for receiver.
+
+ __ StackOverflowCheck(a3, a4, t0, &stack_overflow);
+
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ // Don't copy receiver.
+ __ mov(a3, a0);
+ }
+
+ // This function modifies a2, t0 and a4.
+ GenerateInterpreterPushArgs(masm, a3, a2, a4, t0);
+
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ __ PushRoot(RootIndex::kUndefinedValue);
+ }
+
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Pass the spread in the register a2.
+ // a2 already points to the penultime argument, the spread
+ // is below that.
+ __ Ld_d(a2, MemOperand(a2, -kSystemPointerSize));
+ }
+
+ // Call the target.
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
+ RelocInfo::CODE_TARGET);
+ } else {
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+ }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
+ MacroAssembler* masm, InterpreterPushArgsMode mode) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (not including receiver)
+ // -- a3 : new target
+ // -- a1 : constructor to call
+ // -- a2 : allocation site feedback if available, undefined otherwise.
+ // -- a4 : address of the first argument
+ // -----------------------------------
+ Label stack_overflow;
+ __ addi_d(a6, a0, 1);
+ __ StackOverflowCheck(a6, a5, t0, &stack_overflow);
+
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // The spread argument should not be pushed.
+ __ Sub_d(a0, a0, Operand(1));
+ }
+
+ // Push the arguments, This function modifies t0, a4 and a5.
+ GenerateInterpreterPushArgs(masm, a0, a4, a5, t0);
+
+ // Push a slot for the receiver.
+ __ Push(zero_reg);
+
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Pass the spread in the register a2.
+ // a4 already points to the penultimate argument, the spread
+ // lies in the next interpreter register.
+ __ Ld_d(a2, MemOperand(a4, -kSystemPointerSize));
+ } else {
+ __ AssertUndefinedOrAllocationSite(a2, t0);
+ }
+
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
+ __ AssertFunction(a1);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
+ RelocInfo::CODE_TARGET);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Call the constructor with a0, a1, and a3 unmodified.
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
+ RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
+ // Call the constructor with a0, a1, and a3 unmodified.
+ __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+ }
+}
+
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
+ // Set the return address to the correct point in the interpreter entry
+ // trampoline.
+ Label builtin_trampoline, trampoline_loaded;
+ Smi interpreter_entry_return_pc_offset(
+ masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
+
+ // If the SFI function_data is an InterpreterData, the function will have a
+ // custom copy of the interpreter entry trampoline for profiling. If so,
+ // get the custom trampoline, otherwise grab the entry address of the global
+ // trampoline.
+ __ Ld_d(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ Ld_d(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_d(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
+ __ GetObjectType(t0, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister);
+ __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
+ Operand(INTERPRETER_DATA_TYPE));
+
+ __ Ld_d(t0,
+ FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
+ __ Add_d(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Branch(&trampoline_loaded);
+
+ __ bind(&builtin_trampoline);
+ __ li(t0, ExternalReference::
+ address_of_interpreter_entry_trampoline_instruction_start(
+ masm->isolate()));
+ __ Ld_d(t0, MemOperand(t0, 0));
+
+ __ bind(&trampoline_loaded);
+ __ Add_d(ra, t0, Operand(interpreter_entry_return_pc_offset.value()));
+
+ // Initialize the dispatch table register.
+ __ li(kInterpreterDispatchTableRegister,
+ ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
+
+ // Get the bytecode array pointer from the frame.
+ __ Ld_d(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg);
+ __ Assert(ne,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ kScratchReg, Operand(zero_reg));
+ __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
+ __ Assert(eq,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ a1, Operand(BYTECODE_ARRAY_TYPE));
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ if (FLAG_debug_code) {
+ Label okay;
+ __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ // Unreachable code.
+ __ break_(0xCC);
+ __ bind(&okay);
+ }
+
+ // Dispatch to the target bytecode.
+ __ Add_d(a1, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ Ld_bu(a7, MemOperand(a1, 0));
+ __ Alsl_d(a1, a7, kInterpreterDispatchTableRegister, kPointerSizeLog2, t7);
+ __ Ld_d(kJavaScriptCallCodeStartRegister, MemOperand(a1, 0));
+ __ Jump(kJavaScriptCallCodeStartRegister);
+}
+
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
+ // Advance the current bytecode offset stored within the given interpreter
+ // stack frame. This simulates what all bytecode handlers do upon completion
+ // of the underlying operation.
+ __ Ld_d(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Ld_d(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ Label enter_bytecode, function_entry_bytecode;
+ __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+
+ // Load the current bytecode.
+ __ Add_d(a1, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ Ld_bu(a1, MemOperand(a1, 0));
+
+ // Advance to the next bytecode.
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ a4, &if_return);
+
+ __ bind(&enter_bytecode);
+ // Convert new bytecode offset to a Smi and save in the stackframe.
+ __ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
+ __ St_d(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ Generate_InterpreterEnterBytecode(masm);
+
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ Branch(&enter_bytecode);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
+}
+
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
+ Generate_InterpreterEnterBytecode(masm);
+}
+
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Default());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ if (with_result) {
+ if (java_script_builtin) {
+ __ mov(scratch, a0);
+ } else {
+ // Overwrite the hole inserted by the deoptimizer with the return value
+ // from the LAZY deopt point.
+ __ St_d(
+ a0,
+ MemOperand(
+ sp, config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
+ }
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ Pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
+ }
+
+ if (with_result && java_script_builtin) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point. t0 contains the arguments count, the return value
+ // from LAZY is always the last argument.
+ __ Add_d(a0, a0,
+ Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
+ __ Alsl_d(t0, a0, sp, kSystemPointerSizeLog2, t7);
+ __ St_d(scratch, MemOperand(t0, 0));
+ // Recover arguments count.
+ __ Sub_d(a0, a0,
+ Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
+ }
+
+ __ Ld_d(
+ fp,
+ MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ // Load builtin index (stored as a Smi) and use it to get the builtin start
+ // address from the builtins table.
+ __ Pop(t0);
+ __ Add_d(sp, sp,
+ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(ra);
+ __ LoadEntryFromBuiltinIndex(t0);
+ __ Jump(t0);
+}
+} // namespace
+
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
+}
+
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
+}
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ }
+
+ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), a0.code());
+ __ Ld_d(a0, MemOperand(sp, 0 * kPointerSize));
+ __ Add_d(sp, sp, Operand(1 * kPointerSize)); // Remove state.
+ __ Ret();
+}
+
+namespace {
+
+void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
+ Operand offset = Operand(zero_reg)) {
+ __ Add_d(ra, entry_address, offset);
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+}
+
+void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+ }
+
+ // If the code object is null, just return to the caller.
+ __ Ret(eq, a0, Operand(Smi::zero()));
+
+ if (is_interpreter) {
+ // Drop the handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ __ LeaveFrame(StackFrame::STUB);
+ }
+
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ Ld_d(a1, MemOperand(a0, Code::kDeoptimizationDataOrInterpreterDataOffset -
+ kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex) -
+ kHeapObjectTag));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ Add_d(a0, a0, a1);
+ Generate_OSREntry(masm, a0, Operand(Code::kHeaderSize - kHeapObjectTag));
+}
+} // namespace
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ return OnStackReplacement(masm, true);
+}
+
+void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ __ Ld_d(kContextRegister,
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+ return OnStackReplacement(masm, false);
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : receiver
+ // -- sp[4] : thisArg
+ // -- sp[8] : argArray
+ // -----------------------------------
+
+ Register argc = a0;
+ Register arg_array = a2;
+ Register receiver = a1;
+ Register this_arg = a5;
+ Register undefined_value = a3;
+ Register scratch = a4;
+
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
+
+ // 1. Load receiver into a1, argArray into a2 (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ // Claim (2 - argc) dummy arguments form the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+
+ __ mov(scratch, argc);
+ __ Ld_d(this_arg, MemOperand(sp, kPointerSize));
+ __ Ld_d(arg_array, MemOperand(sp, 2 * kPointerSize));
+ __ Movz(arg_array, undefined_value, scratch); // if argc == 0
+ __ Movz(this_arg, undefined_value, scratch); // if argc == 0
+ __ Sub_d(scratch, scratch, Operand(1));
+ __ Movz(arg_array, undefined_value, scratch); // if argc == 1
+ __ Ld_d(receiver, MemOperand(sp, 0));
+ __ Alsl_d(sp, argc, sp, kSystemPointerSizeLog2, t7);
+ __ St_d(this_arg, MemOperand(sp, 0));
+ }
+
+ // ----------- S t a t e -------------
+ // -- a2 : argArray
+ // -- a1 : receiver
+ // -- a3 : undefined root value
+ // -- sp[0] : thisArg
+ // -----------------------------------
+
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments);
+ __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value));
+
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
+ RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ mov(a0, zero_reg);
+ DCHECK(receiver == a1);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
+ // 1. Get the callable to call (passed as receiver) from the stack.
+ { __ Pop(a1); }
+
+ // 2. Make sure we have at least one argument.
+ // a0: actual number of arguments
+ {
+ Label done;
+ __ Branch(&done, ne, a0, Operand(zero_reg));
+ __ PushRoot(RootIndex::kUndefinedValue);
+ __ Add_d(a0, a0, Operand(1));
+ __ bind(&done);
+ }
+
+ // 3. Adjust the actual number of arguments.
+ __ addi_d(a0, a0, -1);
+
+ // 4. Call the callable.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : receiver
+ // -- sp[8] : target (if argc >= 1)
+ // -- sp[16] : thisArgument (if argc >= 2)
+ // -- sp[24] : argumentsList (if argc == 3)
+ // -----------------------------------
+
+ Register argc = a0;
+ Register arguments_list = a2;
+ Register target = a1;
+ Register this_argument = a5;
+ Register undefined_value = a3;
+ Register scratch = a4;
+
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
+
+ // 1. Load target into a1 (if present), argumentsList into a2 (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
+ {
+ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+
+ __ mov(scratch, argc);
+ __ Ld_d(target, MemOperand(sp, kPointerSize));
+ __ Ld_d(this_argument, MemOperand(sp, 2 * kPointerSize));
+ __ Ld_d(arguments_list, MemOperand(sp, 3 * kPointerSize));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
+ __ Movz(this_argument, undefined_value, scratch); // if argc == 0
+ __ Movz(target, undefined_value, scratch); // if argc == 0
+ __ Sub_d(scratch, scratch, Operand(1));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
+ __ Movz(this_argument, undefined_value, scratch); // if argc == 1
+ __ Sub_d(scratch, scratch, Operand(1));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 2
+
+ __ Alsl_d(sp, argc, sp, kSystemPointerSizeLog2, t7);
+ __ St_d(this_argument, MemOperand(sp, 0)); // Overwrite receiver
+ }
+
+ // ----------- S t a t e -------------
+ // -- a2 : argumentsList
+ // -- a1 : target
+ // -- a3 : undefined root value
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
+
+ // 3. Apply the target to the given argumentsList.
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
+ RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : receiver
+ // -- sp[8] : target
+ // -- sp[16] : argumentsList
+ // -- sp[24] : new.target (optional)
+ // -----------------------------------
+
+ Register argc = a0;
+ Register arguments_list = a2;
+ Register target = a1;
+ Register new_target = a3;
+ Register undefined_value = a4;
+ Register scratch = a5;
+
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
+
+ // 1. Load target into a1 (if present), argumentsList into a2 (if present),
+ // new.target into a3 (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
+ {
+ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+
+ __ mov(scratch, argc);
+ __ Ld_d(target, MemOperand(sp, kPointerSize));
+ __ Ld_d(arguments_list, MemOperand(sp, 2 * kPointerSize));
+ __ Ld_d(new_target, MemOperand(sp, 3 * kPointerSize));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
+ __ Movz(new_target, undefined_value, scratch); // if argc == 0
+ __ Movz(target, undefined_value, scratch); // if argc == 0
+ __ Sub_d(scratch, scratch, Operand(1));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
+ __ Movz(new_target, target, scratch); // if argc == 1
+ __ Sub_d(scratch, scratch, Operand(1));
+ __ Movz(new_target, target, scratch); // if argc == 2
+
+ __ Alsl_d(sp, argc, sp, kSystemPointerSizeLog2, t7);
+ __ St_d(undefined_value, MemOperand(sp, 0)); // Overwrite receiver
+ }
+
+ // ----------- S t a t e -------------
+ // -- a2 : argumentsList
+ // -- a1 : target
+ // -- a3 : new.target
+ // -- sp[0] : receiver (undefined)
+ // -----------------------------------
+
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
+
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
+
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
+ RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
+ // ----------- S t a t e -------------
+ // -- a1 : target
+ // -- a0 : number of parameters on the stack (not including the receiver)
+ // -- a2 : arguments list (a FixedArray)
+ // -- a4 : len (number of elements to push from args)
+ // -- a3 : new.target (for [[Construct]])
+ // -----------------------------------
+ if (FLAG_debug_code) {
+ // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0.
+ Label ok, fail;
+ __ AssertNotSmi(a2);
+ __ GetObjectType(a2, t8, t8);
+ __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE));
+ __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE));
+ __ Branch(&ok, eq, a4, Operand(zero_reg));
+ // Fall through.
+ __ bind(&fail);
+ __ Abort(AbortReason::kOperandIsNotAFixedArray);
+
+ __ bind(&ok);
+ }
+
+ Register args = a2;
+ Register len = a4;
+
+ // Check for stack overflow.
+ Label stack_overflow;
+ __ StackOverflowCheck(len, kScratchReg, a5, &stack_overflow);
+
+ // Move the arguments already in the stack,
+ // including the receiver and the return address.
+ {
+ Label copy;
+ Register src = a6, dest = a7;
+ __ mov(src, sp);
+ __ slli_d(t0, a4, kSystemPointerSizeLog2);
+ __ Sub_d(sp, sp, Operand(t0));
+ // Update stack pointer.
+ __ mov(dest, sp);
+ __ Add_d(t0, a0, Operand(zero_reg));
+
+ __ bind(&copy);
+ __ Ld_d(t1, MemOperand(src, 0));
+ __ St_d(t1, MemOperand(dest, 0));
+ __ Sub_d(t0, t0, Operand(1));
+ __ Add_d(src, src, Operand(kSystemPointerSize));
+ __ Add_d(dest, dest, Operand(kSystemPointerSize));
+ __ Branch(&copy, ge, t0, Operand(zero_reg));
+ }
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ Label done, push, loop;
+ Register src = a6;
+ Register scratch = len;
+
+ __ addi_d(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add_d(a0, a0, len); // The 'len' argument for Call() or Construct().
+ __ Branch(&done, eq, len, Operand(zero_reg));
+ __ slli_d(scratch, len, kPointerSizeLog2);
+ __ Sub_d(scratch, sp, Operand(scratch));
+ __ LoadRoot(t1, RootIndex::kTheHoleValue);
+ __ bind(&loop);
+ __ Ld_d(a5, MemOperand(src, 0));
+ __ addi_d(src, src, kPointerSize);
+ __ Branch(&push, ne, a5, Operand(t1));
+ __ LoadRoot(a5, RootIndex::kUndefinedValue);
+ __ bind(&push);
+ __ St_d(a5, MemOperand(a7, 0));
+ __ Add_d(a7, a7, Operand(kSystemPointerSize));
+ __ Add_d(scratch, scratch, Operand(kSystemPointerSize));
+ __ Branch(&loop, ne, scratch, Operand(sp));
+ __ bind(&done);
+ }
+
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
+
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+}
+
+// static
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ CallOrConstructMode mode,
+ Handle<Code> code) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a3 : the new.target (for [[Construct]] calls)
+ // -- a1 : the target to call (can be any Object)
+ // -- a2 : start index (to support rest parameters)
+ // -----------------------------------
+
+ // Check if new.target has a [[Construct]] internal method.
+ if (mode == CallOrConstructMode::kConstruct) {
+ Label new_target_constructor, new_target_not_constructor;
+ __ JumpIfSmi(a3, &new_target_not_constructor);
+ __ Ld_d(t1, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ Ld_bu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask));
+ __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg));
+ __ bind(&new_target_not_constructor);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ Push(a3);
+ __ CallRuntime(Runtime::kThrowNotConstructor);
+ }
+ __ bind(&new_target_constructor);
+ }
+
+ Label stack_done, stack_overflow;
+ __ Ld_d(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ Sub_w(a7, a7, a2);
+ __ Branch(&stack_done, le, a7, Operand(zero_reg));
+ {
+ // Check for stack overflow.
+ __ StackOverflowCheck(a7, a4, a5, &stack_overflow);
+
+ // Forward the arguments from the caller frame.
+
+ // Point to the first argument to copy (skipping the receiver).
+ __ Add_d(a6, fp,
+ Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
+ kSystemPointerSize));
+ __ Alsl_d(a6, a2, a6, kSystemPointerSizeLog2, t7);
+
+ // Move the arguments already in the stack,
+ // including the receiver and the return address.
+ {
+ Label copy;
+ Register src = t0, dest = a2;
+ __ mov(src, sp);
+ // Update stack pointer.
+ __ slli_d(t1, a7, kSystemPointerSizeLog2);
+ __ Sub_d(sp, sp, Operand(t1));
+ __ mov(dest, sp);
+ __ Add_d(t2, a0, Operand(zero_reg));
+
+ __ bind(&copy);
+ __ Ld_d(t1, MemOperand(src, 0));
+ __ St_d(t1, MemOperand(dest, 0));
+ __ Sub_d(t2, t2, Operand(1));
+ __ Add_d(src, src, Operand(kSystemPointerSize));
+ __ Add_d(dest, dest, Operand(kSystemPointerSize));
+ __ Branch(&copy, ge, t2, Operand(zero_reg));
+ }
+
+ // Copy arguments from the caller frame.
+ // TODO(victorgomes): Consider using forward order as potentially more cache
+ // friendly.
+ {
+ Label loop;
+ __ Add_d(a0, a0, a7);
+ __ bind(&loop);
+ {
+ __ Sub_w(a7, a7, Operand(1));
+ __ Alsl_d(t0, a7, a6, kPointerSizeLog2, t7);
+ __ Ld_d(kScratchReg, MemOperand(t0, 0));
+ __ Alsl_d(t0, a7, a2, kPointerSizeLog2, t7);
+ __ St_d(kScratchReg, MemOperand(t0, 0));
+ __ Branch(&loop, ne, a7, Operand(zero_reg));
+ }
+ }
+ }
+ __ Branch(&stack_done);
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&stack_done);
+
+ // Tail-call to the {code} handler.
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSFunction)
+ // -----------------------------------
+ __ AssertFunction(a1);
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that function is not a "classConstructor".
+ Label class_constructor;
+ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
+ __ And(kScratchReg, a3,
+ Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
+ __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg));
+
+ // Enter the context of the function; ToObject has to run in the function
+ // context, and we also need to take the global proxy from the function
+ // context in case of conversion.
+ __ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // We need to convert the receiver for non-native sloppy mode functions.
+ Label done_convert;
+ __ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
+ __ And(kScratchReg, a3,
+ Operand(SharedFunctionInfo::IsNativeBit::kMask |
+ SharedFunctionInfo::IsStrictBit::kMask));
+ __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
+ {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSFunction)
+ // -- a2 : the shared function info.
+ // -- cp : the function context.
+ // -----------------------------------
+
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(a3);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ LoadReceiver(a3, a0);
+ __ JumpIfSmi(a3, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ GetObjectType(a3, a4, a4);
+ __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE));
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy);
+ __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(a3);
+ }
+ __ Branch(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a0);
+ __ Push(a0, a1);
+ __ mov(a0, a3);
+ __ Push(cp);
+ __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
+ RelocInfo::CODE_TARGET);
+ __ Pop(cp);
+ __ mov(a3, a0);
+ __ Pop(a0, a1);
+ __ SmiUntag(a0);
+ }
+ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
+ }
+ __ StoreReceiver(a3, a0, kScratchReg);
+ }
+ __ bind(&done_convert);
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSFunction)
+ // -- a2 : the shared function info.
+ // -- cp : the function context.
+ // -----------------------------------
+
+ __ Ld_hu(
+ a2, FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
+}
+
+// static
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(a1);
+
+ // Patch the receiver to [[BoundThis]].
+ {
+ __ Ld_d(t0, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
+ __ StoreReceiver(t0, a0, kScratchReg);
+ }
+
+ // Load [[BoundArguments]] into a2 and length of that into a4.
+ __ Ld_d(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- a4 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ slli_d(a5, a4, kPointerSizeLog2);
+ __ Sub_d(t0, sp, Operand(a5));
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
+ __ Branch(&done, hs, t0, Operand(kScratchReg));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Pop receiver.
+ __ Pop(t0);
+
+ // Push [[BoundArguments]].
+ {
+ Label loop, done_loop;
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ Add_d(a0, a0, Operand(a4));
+ __ Add_d(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ Sub_d(a4, a4, Operand(1));
+ __ Branch(&done_loop, lt, a4, Operand(zero_reg));
+ __ Alsl_d(a5, a4, a2, kPointerSizeLog2, t7);
+ __ Ld_d(kScratchReg, MemOperand(a5, 0));
+ __ Push(kScratchReg);
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Push receiver.
+ __ Push(t0);
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ Ld_d(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
+ RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the target to call (can be any Object).
+ // -----------------------------------
+
+ Label non_callable, non_smi;
+ __ JumpIfSmi(a1, &non_callable);
+ __ bind(&non_smi);
+ __ LoadMap(t1, a1);
+ __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t8);
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ RelocInfo::CODE_TARGET, ls, t8,
+ Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+ // Check if target has a [[Call]] internal method.
+ __ Ld_bu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t1, t1, Operand(Map::Bits1::IsCallableBit::kMask));
+ __ Branch(&non_callable, eq, t1, Operand(zero_reg));
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq,
+ t2, Operand(JS_PROXY_TYPE));
+
+ // 2. Call to something else, which might have a [[Call]] internal method (if
+ // not we raise an exception).
+ // Overwrite the original receiver with the (original) target.
+ __ StoreReceiver(a1, a0, kScratchReg);
+ // Let the "call_as_function_delegate" take care of the rest.
+ __ LoadNativeContextSlot(a1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
+
+ // 3. Call to something that is not callable.
+ __ bind(&non_callable);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
+ }
+}
+
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the constructor to call (checked to be a JSFunction)
+ // -- a3 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertConstructor(a1);
+ __ AssertFunction(a1);
+
+ // Calling convention for function specific ConstructStubs require
+ // a2 to contain either an AllocationSite or undefined.
+ __ LoadRoot(a2, RootIndex::kUndefinedValue);
+
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
+ __ Ld_d(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_wu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
+ __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg));
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
+ RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a3 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertConstructor(a1);
+ __ AssertBoundFunction(a1);
+
+ // Load [[BoundArguments]] into a2 and length of that into a4.
+ __ Ld_d(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- a3 : the new target (checked to be a constructor)
+ // -- a4 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ slli_d(a5, a4, kPointerSizeLog2);
+ __ Sub_d(t0, sp, Operand(a5));
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
+ __ Branch(&done, hs, t0, Operand(kScratchReg));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Pop receiver.
+ __ Pop(t0);
+
+ // Push [[BoundArguments]].
+ {
+ Label loop, done_loop;
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ Add_d(a0, a0, Operand(a4));
+ __ Add_d(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ Sub_d(a4, a4, Operand(1));
+ __ Branch(&done_loop, lt, a4, Operand(zero_reg));
+ __ Alsl_d(a5, a4, a2, kPointerSizeLog2, t7);
+ __ Ld_d(kScratchReg, MemOperand(a5, 0));
+ __ Push(kScratchReg);
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Push receiver.
+ __ Push(t0);
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ {
+ Label skip_load;
+ __ Branch(&skip_load, ne, a1, Operand(a3));
+ __ Ld_d(a3,
+ FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ bind(&skip_load);
+ }
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ Ld_d(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the constructor to call (can be any Object)
+ // -- a3 : the new target (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ // Check if target is a Smi.
+ Label non_constructor, non_proxy;
+ __ JumpIfSmi(a1, &non_constructor);
+
+ // Check if target has a [[Construct]] internal method.
+ __ Ld_d(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Ld_bu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t3, t3, Operand(Map::Bits1::IsConstructorBit::kMask));
+ __ Branch(&non_constructor, eq, t3, Operand(zero_reg));
+
+ // Dispatch based on instance type.
+ __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t8);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET, ls, t8,
+ Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
+
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+ // Only dispatch to proxies after checking whether they are constructors.
+ __ Branch(&non_proxy, ne, t2, Operand(JS_PROXY_TYPE));
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
+ RelocInfo::CODE_TARGET);
+
+ // Called Construct on an exotic Object with a [[Construct]] internal method.
+ __ bind(&non_proxy);
+ {
+ // Overwrite the original receiver with the (original) target.
+ __ StoreReceiver(a1, a0, kScratchReg);
+ // Let the "call_as_constructor_delegate" take care of the rest.
+ __ LoadNativeContextSlot(a1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
+ __ Jump(masm->isolate()->builtins()->CallFunction(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ // Called Construct on an Object that doesn't have a [[Construct]] internal
+ // method.
+ __ bind(&non_constructor);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
+ RelocInfo::CODE_TARGET);
+}
+
+#if V8_ENABLE_WEBASSEMBLY
+void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
+ // The function index was put in t0 by the jump table trampoline.
+ // Convert to Smi for the runtime call
+ __ SmiTag(kWasmCompileLazyFuncIndexRegister);
+ {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
+
+ // Save all parameter registers (see wasm-linkage.h). They might be
+ // overwritten in the runtime call below. We don't have any callee-saved
+ // registers in wasm, so no need to store anything else.
+ RegList gp_regs = 0;
+ for (Register gp_param_reg : wasm::kGpParamRegisters) {
+ gp_regs |= gp_param_reg.bit();
+ }
+
+ RegList fp_regs = 0;
+ for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
+ fp_regs |= fp_param_reg.bit();
+ }
+
+ CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters));
+ CHECK_EQ(NumRegs(fp_regs), arraysize(wasm::kFpParamRegisters));
+ CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
+ NumRegs(gp_regs));
+ CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
+ NumRegs(fp_regs));
+
+ __ MultiPush(gp_regs);
+ __ MultiPushFPU(fp_regs);
+
+ // kFixedFrameSizeFromFp is hard coded to include space for Simd
+ // registers, so we still need to allocate extra (unused) space on the stack
+ // as if they were saved.
+ __ Sub_d(sp, sp, base::bits::CountPopulation(fp_regs) * kDoubleSize);
+
+ // Pass instance and function index as an explicit arguments to the runtime
+ // function.
+ __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(kContextRegister, Smi::zero());
+ __ CallRuntime(Runtime::kWasmCompileLazy, 2);
+ __ mov(t8, a0);
+
+ __ Add_d(sp, sp, base::bits::CountPopulation(fp_regs) * kDoubleSize);
+ // Restore registers.
+ __ MultiPopFPU(fp_regs);
+ __ MultiPop(gp_regs);
+ }
+ // Finally, jump to the entrypoint.
+ __ Jump(t8);
+}
+
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(cp, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
+ __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ }
+ __ Ret();
+}
+
+void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
+ __ Trap();
+}
+
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
+
+#endif // V8_ENABLE_WEBASSEMBLY
+
+void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
+ SaveFPRegsMode save_doubles, ArgvMode argv_mode,
+ bool builtin_exit_frame) {
+ // Called from JavaScript; parameters are on stack as if calling JS function
+ // a0: number of arguments including receiver
+ // a1: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
+ // cp: current context (C callee-saved)
+ //
+ // If argv_mode == ArgvMode::kRegister:
+ // a2: pointer to the first argument
+
+ if (argv_mode == ArgvMode::kRegister) {
+ // Move argv into the correct register.
+ __ mov(s1, a2);
+ } else {
+ // Compute the argv pointer in a callee-saved register.
+ __ Alsl_d(s1, a0, sp, kPointerSizeLog2, t7);
+ __ Sub_d(s1, s1, kPointerSize);
+ }
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(
+ save_doubles == SaveFPRegsMode::kSave, 0,
+ builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
+
+ // s0: number of arguments including receiver (C callee-saved)
+ // s1: pointer to first argument (C callee-saved)
+ // s2: pointer to builtin function (C callee-saved)
+
+ // Prepare arguments for C routine.
+ // a0 = argc
+ __ mov(s0, a0);
+ __ mov(s2, a1);
+
+ // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
+ // also need to reserve the 4 argument slots on the stack.
+
+ __ AssertStackIsAligned();
+
+ // a0 = argc, a1 = argv, a2 = isolate
+ __ li(a2, ExternalReference::isolate_address(masm->isolate()));
+ __ mov(a1, s1);
+
+ __ StoreReturnAddressAndCall(s2);
+
+ // Result returned in a0 or a1:a0 - do not destroy these registers!
+
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ LoadRoot(a4, RootIndex::kException);
+ __ Branch(&exception_returned, eq, a4, Operand(a0));
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned the exception sentinel.
+ if (FLAG_debug_code) {
+ Label okay;
+ ExternalReference pending_exception_address = ExternalReference::Create(
+ IsolateAddressId::kPendingExceptionAddress, masm->isolate());
+ __ li(a2, pending_exception_address);
+ __ Ld_d(a2, MemOperand(a2, 0));
+ __ LoadRoot(a4, RootIndex::kTheHoleValue);
+ // Cannot use check here as it attempts to generate call into runtime.
+ __ Branch(&okay, eq, a4, Operand(a2));
+ __ stop();
+ __ bind(&okay);
+ }
+
+ // Exit C frame and return.
+ // a0:a1: result
+ // sp: stack pointer
+ // fp: frame pointer
+ Register argc = argv_mode == ArgvMode::kRegister
+ // We don't want to pop arguments so set argc to no_reg.
+ ? no_reg
+ // s0: still holds argc (callee-saved).
+ : s0;
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN);
+
+ // Handling of exception.
+ __ bind(&exception_returned);
+
+ ExternalReference pending_handler_context_address = ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
+ ExternalReference pending_handler_entrypoint_address =
+ ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
+ ExternalReference pending_handler_fp_address = ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
+ ExternalReference pending_handler_sp_address = ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
+
+ // Ask the runtime for help to determine the handler. This will set a0 to
+ // contain the current pending exception, don't clobber it.
+ ExternalReference find_handler =
+ ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(3, 0, a0);
+ __ mov(a0, zero_reg);
+ __ mov(a1, zero_reg);
+ __ li(a2, ExternalReference::isolate_address(masm->isolate()));
+ __ CallCFunction(find_handler, 3);
+ }
+
+ // Retrieve the handler context, SP and FP.
+ __ li(cp, pending_handler_context_address);
+ __ Ld_d(cp, MemOperand(cp, 0));
+ __ li(sp, pending_handler_sp_address);
+ __ Ld_d(sp, MemOperand(sp, 0));
+ __ li(fp, pending_handler_fp_address);
+ __ Ld_d(fp, MemOperand(fp, 0));
+
+ // If the handler is a JS frame, restore the context to the frame. Note that
+ // the context will be set to (cp == 0) for non-JS frames.
+ Label zero;
+ __ Branch(&zero, eq, cp, Operand(zero_reg));
+ __ St_d(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&zero);
+
+ // Clear c_entry_fp, like we do in `LeaveExitFrame`.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ li(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ St_d(zero_reg, MemOperand(scratch, 0));
+ }
+
+ // Compute the handler entry address and jump to it.
+ __ li(t7, pending_handler_entrypoint_address);
+ __ Ld_d(t7, MemOperand(t7, 0));
+ __ Jump(t7);
+}
+
+void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
+ Label done;
+ Register result_reg = t0;
+
+ Register scratch = GetRegisterThatIsNotOneOf(result_reg);
+ Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
+ Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
+ DoubleRegister double_scratch = kScratchDoubleReg;
+
+ // Account for saved regs.
+ const int kArgumentOffset = 4 * kPointerSize;
+
+ __ Push(result_reg);
+ __ Push(scratch, scratch2, scratch3);
+
+ // Load double input.
+ __ Fld_d(double_scratch, MemOperand(sp, kArgumentOffset));
+
+ // Try a conversion to a signed integer.
+ __ ftintrz_w_d(double_scratch, double_scratch);
+ // Move the converted value into the result register.
+ __ movfr2gr_s(scratch3, double_scratch);
+
+ // Retrieve and restore the FCSR.
+ __ movfcsr2gr(scratch);
+
+ // Check for overflow and NaNs.
+ __ And(scratch, scratch,
+ kFCSRExceptionCauseMask ^ kFCSRDivideByZeroCauseMask);
+ // If we had no exceptions then set result_reg and we are done.
+ Label error;
+ __ Branch(&error, ne, scratch, Operand(zero_reg));
+ __ Move(result_reg, scratch3);
+ __ Branch(&done);
+ __ bind(&error);
+
+ // Load the double value and perform a manual truncation.
+ Register input_high = scratch2;
+ Register input_low = scratch3;
+
+ __ Ld_w(input_low,
+ MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
+ __ Ld_w(input_high,
+ MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
+
+ Label normal_exponent;
+ // Extract the biased exponent in result.
+ __ bstrpick_w(result_reg, input_high,
+ HeapNumber::kExponentShift + HeapNumber::kExponentBits - 1,
+ HeapNumber::kExponentShift);
+
+ // Check for Infinity and NaNs, which should return 0.
+ __ Sub_w(scratch, result_reg, HeapNumber::kExponentMask);
+ __ Movz(result_reg, zero_reg, scratch);
+ __ Branch(&done, eq, scratch, Operand(zero_reg));
+
+ // Express exponent as delta to (number of mantissa bits + 31).
+ __ Sub_w(result_reg, result_reg,
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
+
+ // If the delta is strictly positive, all bits would be shifted away,
+ // which means that we can return 0.
+ __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
+ __ mov(result_reg, zero_reg);
+ __ Branch(&done);
+
+ __ bind(&normal_exponent);
+ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ // Calculate shift.
+ __ Add_w(scratch, result_reg,
+ Operand(kShiftBase + HeapNumber::kMantissaBits));
+
+ // Save the sign.
+ Register sign = result_reg;
+ result_reg = no_reg;
+ __ And(sign, input_high, Operand(HeapNumber::kSignMask));
+
+ // On ARM shifts > 31 bits are valid and will result in zero. On LOONG64 we
+ // need to check for this specific case.
+ Label high_shift_needed, high_shift_done;
+ __ Branch(&high_shift_needed, lt, scratch, Operand(32));
+ __ mov(input_high, zero_reg);
+ __ Branch(&high_shift_done);
+ __ bind(&high_shift_needed);
+
+ // Set the implicit 1 before the mantissa part in input_high.
+ __ Or(input_high, input_high,
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+ // Shift the mantissa bits to the correct position.
+ // We don't need to clear non-mantissa bits as they will be shifted away.
+ // If they weren't, it would mean that the answer is in the 32bit range.
+ __ sll_w(input_high, input_high, scratch);
+
+ __ bind(&high_shift_done);
+
+ // Replace the shifted bits with bits from the lower mantissa word.
+ Label pos_shift, shift_done;
+ __ li(kScratchReg, 32);
+ __ sub_w(scratch, kScratchReg, scratch);
+ __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
+
+ // Negate scratch.
+ __ Sub_w(scratch, zero_reg, scratch);
+ __ sll_w(input_low, input_low, scratch);
+ __ Branch(&shift_done);
+
+ __ bind(&pos_shift);
+ __ srl_w(input_low, input_low, scratch);
+
+ __ bind(&shift_done);
+ __ Or(input_high, input_high, Operand(input_low));
+ // Restore sign if necessary.
+ __ mov(scratch, sign);
+ result_reg = sign;
+ sign = no_reg;
+ __ Sub_w(result_reg, zero_reg, input_high);
+ __ Movz(result_reg, input_high, scratch);
+
+ __ bind(&done);
+
+ __ St_d(result_reg, MemOperand(sp, kArgumentOffset));
+ __ Pop(scratch, scratch2, scratch3);
+ __ Pop(result_reg);
+ __ Ret();
+}
+
+namespace {
+
+int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ int64_t offset = (ref0.address() - ref1.address());
+ DCHECK(static_cast<int>(offset) == offset);
+ return static_cast<int>(offset);
+}
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Restores context. stack_space
+// - space to be unwound on exit (includes the call JS arguments space and
+// the additional space allocated for the fast call).
+void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
+ ExternalReference thunk_ref, int stack_space,
+ MemOperand* stack_space_operand,
+ MemOperand return_value_operand) {
+ Isolate* isolate = masm->isolate();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+
+ DCHECK(function_address == a1 || function_address == a2);
+
+ Label profiler_enabled, end_profiler_check;
+ __ li(t7, ExternalReference::is_profiling_address(isolate));
+ __ Ld_b(t7, MemOperand(t7, 0));
+ __ Branch(&profiler_enabled, ne, t7, Operand(zero_reg));
+ __ li(t7, ExternalReference::address_of_runtime_stats_flag());
+ __ Ld_w(t7, MemOperand(t7, 0));
+ __ Branch(&profiler_enabled, ne, t7, Operand(zero_reg));
+ {
+ // Call the api function directly.
+ __ mov(t7, function_address);
+ __ Branch(&end_profiler_check);
+ }
+
+ __ bind(&profiler_enabled);
+ {
+ // Additional parameter is the address of the actual callback.
+ __ li(t7, thunk_ref);
+ }
+ __ bind(&end_profiler_check);
+
+ // Allocate HandleScope in callee-save registers.
+ __ li(s5, next_address);
+ __ Ld_d(s0, MemOperand(s5, kNextOffset));
+ __ Ld_d(s1, MemOperand(s5, kLimitOffset));
+ __ Ld_w(s2, MemOperand(s5, kLevelOffset));
+ __ Add_w(s2, s2, Operand(1));
+ __ St_w(s2, MemOperand(s5, kLevelOffset));
+
+ __ StoreReturnAddressAndCall(t7);
+
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // Load value from ReturnValue.
+ __ Ld_d(a0, return_value_operand);
+ __ bind(&return_value_loaded);
+
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ St_d(s0, MemOperand(s5, kNextOffset));
+ if (FLAG_debug_code) {
+ __ Ld_w(a1, MemOperand(s5, kLevelOffset));
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
+ Operand(s2));
+ }
+ __ Sub_w(s2, s2, Operand(1));
+ __ St_w(s2, MemOperand(s5, kLevelOffset));
+ __ Ld_d(kScratchReg, MemOperand(s5, kLimitOffset));
+ __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
+
+ // Leave the API exit frame.
+ __ bind(&leave_exit_frame);
+
+ if (stack_space_operand == nullptr) {
+ DCHECK_NE(stack_space, 0);
+ __ li(s0, Operand(stack_space));
+ } else {
+ DCHECK_EQ(stack_space, 0);
+ __ Ld_d(s0, *stack_space_operand);
+ }
+
+ static constexpr bool kDontSaveDoubles = false;
+ static constexpr bool kRegisterContainsSlotCount = false;
+ __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN,
+ kRegisterContainsSlotCount);
+
+ // Check if the function scheduled an exception.
+ __ LoadRoot(a4, RootIndex::kTheHoleValue);
+ __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
+ __ Ld_d(a5, MemOperand(kScratchReg, 0));
+ __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
+
+ __ Ret();
+
+ // Re-throw by promoting a scheduled exception.
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ bind(&delete_allocated_handles);
+ __ St_d(s1, MemOperand(s5, kLimitOffset));
+ __ mov(s0, a0);
+ __ PrepareCallCFunction(1, s1);
+ __ li(a0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
+ __ mov(a0, s0);
+ __ jmp(&leave_exit_frame);
+}
+
+} // namespace
+
+void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- a1 : api function address
+ // -- a2 : arguments count (not including the receiver)
+ // -- a3 : call data
+ // -- a0 : holder
+ // -- sp[0] : receiver
+ // -- sp[8] : first argument
+ // -- ...
+ // -- sp[(argc) * 8] : last argument
+ // -----------------------------------
+
+ Register api_function_address = a1;
+ Register argc = a2;
+ Register call_data = a3;
+ Register holder = a0;
+ Register scratch = t0;
+ Register base = t1; // For addressing MemOperands on the stack.
+
+ DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch,
+ base));
+
+ using FCA = FunctionCallbackArguments;
+
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+
+ // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
+ //
+ // Target state:
+ // sp[0 * kPointerSize]: kHolder
+ // sp[1 * kPointerSize]: kIsolate
+ // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
+ // sp[3 * kPointerSize]: undefined (kReturnValue)
+ // sp[4 * kPointerSize]: kData
+ // sp[5 * kPointerSize]: undefined (kNewTarget)
+
+ // Set up the base register for addressing through MemOperands. It will point
+ // at the receiver (located at sp + argc * kPointerSize).
+ __ Alsl_d(base, argc, sp, kPointerSizeLog2, t7);
+
+ // Reserve space on the stack.
+ __ Sub_d(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
+
+ // kHolder.
+ __ St_d(holder, MemOperand(sp, 0 * kPointerSize));
+
+ // kIsolate.
+ __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ St_d(scratch, MemOperand(sp, 1 * kPointerSize));
+
+ // kReturnValueDefaultValue and kReturnValue.
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ St_d(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ St_d(scratch, MemOperand(sp, 3 * kPointerSize));
+
+ // kData.
+ __ St_d(call_data, MemOperand(sp, 4 * kPointerSize));
+
+ // kNewTarget.
+ __ St_d(scratch, MemOperand(sp, 5 * kPointerSize));
+
+ // Keep a pointer to kHolder (= implicit_args) in a scratch register.
+ // We use it below to set up the FunctionCallbackInfo object.
+ __ mov(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ static constexpr int kApiStackSpace = 4;
+ static constexpr bool kDontSaveDoubles = false;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
+
+ // EnterExitFrame may align the sp.
+
+ // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
+ // Arguments are after the return address (pushed by EnterExitFrame()).
+ __ St_d(scratch, MemOperand(sp, 1 * kPointerSize));
+
+ // FunctionCallbackInfo::values_ (points at the first varargs argument passed
+ // on the stack).
+ __ Add_d(scratch, scratch,
+ Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
+
+ __ St_d(scratch, MemOperand(sp, 2 * kPointerSize));
+
+ // FunctionCallbackInfo::length_.
+ // Stored as int field, 32-bit integers within struct on stack always left
+ // justified by n64 ABI.
+ __ St_w(argc, MemOperand(sp, 3 * kPointerSize));
+
+ // We also store the number of bytes to drop from the stack after returning
+ // from the API function here.
+ // Note: Unlike on other architectures, this stores the number of slots to
+ // drop, not the number of bytes.
+ __ Add_d(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */));
+ __ St_d(scratch, MemOperand(sp, 4 * kPointerSize));
+
+ // v8::InvocationCallback's argument.
+ DCHECK(!AreAliased(api_function_address, scratch, a0));
+ __ Add_d(a0, sp, Operand(1 * kPointerSize));
+
+ ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
+
+ // There are two stack slots above the arguments we constructed on the stack.
+ // TODO(jgruber): Document what these arguments are.
+ static constexpr int kStackSlotsAboveFCA = 2;
+ MemOperand return_value_operand(
+ fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
+
+ static constexpr int kUseStackSpaceOperand = 0;
+ MemOperand stack_space_operand(sp, 4 * kPointerSize);
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kUseStackSpaceOperand, &stack_space_operand,
+ return_value_operand);
+}
+
+void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = a4;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ Register api_function_address = a2;
+
+ // Here and below +1 is for name() pushed after the args_ array.
+ using PCA = PropertyCallbackArguments;
+ __ Sub_d(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
+ __ St_d(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
+ __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ St_d(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ St_d(scratch,
+ MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+ __ St_d(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
+ kPointerSize));
+ __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ St_d(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
+ __ St_d(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
+ // should_throw_on_error -> false
+ DCHECK_EQ(0, Smi::zero().ptr());
+ __ St_d(zero_reg,
+ MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
+ __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ St_d(scratch, MemOperand(sp, 0 * kPointerSize));
+
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+ __ mov(a0, sp); // a0 = Handle<Name>
+ __ Add_d(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_
+
+ const int kApiStackSpace = 1;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
+ __ St_d(a1, MemOperand(sp, 1 * kPointerSize));
+ __ Add_d(a1, sp, Operand(1 * kPointerSize));
+ // a1 = v8::PropertyCallbackInfo&
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback();
+
+ __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ Ld_d(api_function_address,
+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ MemOperand* const kUseStackSpaceConstant = nullptr;
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kStackUnwindSpace, kUseStackSpaceConstant,
+ return_value_operand);
+}
+
+void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
+ // The sole purpose of DirectCEntry is for movable callers (e.g. any general
+ // purpose Code object) to be able to call into C functions that may trigger
+ // GC and thus move the caller.
+ //
+ // DirectCEntry places the return address on the stack (updated by the GC),
+ // making the call GC safe. The irregexp backend relies on this.
+
+ __ St_d(ra, MemOperand(sp, 0)); // Store the return address.
+ __ Call(t7); // Call the C++ function.
+ __ Ld_d(ra, MemOperand(sp, 0)); // Return to calling code.
+
+ // TODO(LOONG_dev): LOONG64 Check this assert.
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ // In case of an error the return address may point to a memory area
+ // filled with kZapValue by the GC. Dereference the address and check for
+ // this.
+ __ Ld_d(a4, MemOperand(ra, 0));
+ __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4,
+ Operand(reinterpret_cast<uint64_t>(kZapValue)));
+ }
+
+ __ Jump(ra);
+}
+
+namespace {
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // Unlike on ARM we don't save all the registers, just the useful ones.
+ // For the rest, there are gaps on the stack, so the offsets remain the same.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+ RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
+
+ // Save all double FPU registers before messing with them.
+ __ Sub_d(sp, sp, Operand(kDoubleRegsSize));
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ Fst_d(fpu_reg, MemOperand(sp, offset));
+ }
+
+ // Push saved_regs (needed to populate FrameDescription::registers_).
+ // Leave gaps for other registers.
+ __ Sub_d(sp, sp, kNumberOfRegisters * kPointerSize);
+ for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+ if ((saved_regs & (1 << i)) != 0) {
+ __ St_d(ToRegister(i), MemOperand(sp, kPointerSize * i));
+ }
+ }
+
+ __ li(a2,
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
+ __ St_d(fp, MemOperand(a2, 0));
+
+ const int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+ __ li(a2, Operand(Deoptimizer::kFixedExitSizeMarker));
+ // Get the address of the location in the code object (a3) (return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register a4.
+ __ mov(a3, ra);
+ __ Add_d(a4, sp, Operand(kSavedRegistersAreaSize));
+
+ __ sub_d(a4, fp, a4);
+
+ // Allocate a new deoptimizer object.
+ __ PrepareCallCFunction(6, a5);
+ // Pass six arguments, according to n64 ABI.
+ __ mov(a0, zero_reg);
+ Label context_check;
+ __ Ld_d(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(a1, &context_check);
+ __ Ld_d(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ li(a1, Operand(static_cast<int>(deopt_kind)));
+ // a2: bailout id already loaded.
+ // a3: code address or 0 already loaded.
+ // a4: already has fp-to-sp delta.
+ __ li(a5, ExternalReference::isolate_address(isolate));
+
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+
+ // Preserve "deoptimizer" object in register a0 and get the input
+ // frame descriptor pointer to a1 (deoptimizer->input_);
+ // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
+ __ Ld_d(a1, MemOperand(a0, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((saved_regs & (1 << i)) != 0) {
+ __ Ld_d(a2, MemOperand(sp, i * kPointerSize));
+ __ St_d(a2, MemOperand(a1, offset));
+ } else if (FLAG_debug_code) {
+ __ li(a2, Operand(kDebugZapValue));
+ __ St_d(a2, MemOperand(a1, offset));
+ }
+ }
+
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ // Copy FPU registers to
+ // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ Fld_d(f0, MemOperand(sp, src_offset));
+ __ Fst_d(f0, MemOperand(a1, dst_offset));
+ }
+
+ // Remove the saved registers from the stack.
+ __ Add_d(sp, sp, Operand(kSavedRegistersAreaSize));
+
+ // Compute a pointer to the unwinding limit in register a2; that is
+ // the first stack slot not part of the input frame.
+ __ Ld_d(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
+ __ add_d(a2, a2, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ Add_d(a3, a1, Operand(FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ Label pop_loop_header;
+ __ Branch(&pop_loop_header);
+ __ bind(&pop_loop);
+ __ Pop(a4);
+ __ St_d(a4, MemOperand(a3, 0));
+ __ addi_d(a3, a3, sizeof(uint64_t));
+ __ bind(&pop_loop_header);
+ __ BranchShort(&pop_loop, ne, a2, Operand(sp));
+ // Compute the output frame in the deoptimizer.
+ __ Push(a0); // Preserve deoptimizer object across call.
+ // a0: deoptimizer object; a1: scratch.
+ __ PrepareCallCFunction(1, a1);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ }
+ __ Pop(a0); // Restore deoptimizer object (class Deoptimizer).
+
+ __ Ld_d(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: a4 = current "FrameDescription** output_",
+ // a1 = one past the last FrameDescription**.
+ __ Ld_w(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
+ __ Ld_d(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
+ __ Alsl_d(a1, a1, a4, kPointerSizeLog2);
+ __ Branch(&outer_loop_header);
+ __ bind(&outer_push_loop);
+ // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
+ __ Ld_d(a2, MemOperand(a4, 0)); // output_[ix]
+ __ Ld_d(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+ __ Branch(&inner_loop_header);
+ __ bind(&inner_push_loop);
+ __ Sub_d(a3, a3, Operand(sizeof(uint64_t)));
+ __ Add_d(a6, a2, Operand(a3));
+ __ Ld_d(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
+ __ Push(a7);
+ __ bind(&inner_loop_header);
+ __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
+
+ __ Add_d(a4, a4, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
+
+ __ Ld_d(a1, MemOperand(a0, Deoptimizer::input_offset()));
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ Fld_d(fpu_reg, MemOperand(a1, src_offset));
+ }
+
+ // Push pc and continuation from the last output frame.
+ __ Ld_d(a6, MemOperand(a2, FrameDescription::pc_offset()));
+ __ Push(a6);
+ __ Ld_d(a6, MemOperand(a2, FrameDescription::continuation_offset()));
+ __ Push(a6);
+
+ // Technically restoring 'at' should work unless zero_reg is also restored
+ // but it's safer to check for this.
+ DCHECK(!(t7.bit() & restored_regs));
+ // Restore the registers from the last output frame.
+ __ mov(t7, a2);
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ Ld_d(ToRegister(i), MemOperand(t7, offset));
+ }
+ }
+
+ __ Pop(t7); // Get continuation, leave pc on stack.
+ __ Pop(ra);
+ __ Jump(t7);
+ __ stop();
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
+
+namespace {
+
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
+ Label start;
+ __ bind(&start);
+
+ // Get function from the frame.
+ Register closure = a1;
+ __ Ld_d(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+
+ // Get the Code object from the shared function info.
+ Register code_obj = s1;
+ __ Ld_d(code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_d(code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ __ GetObjectType(code_obj, t2, t2);
+ __ Branch(&start_with_baseline, eq, t2, Operand(CODET_TYPE));
+
+ // Start with bytecode as there is no baseline code.
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ // Start with baseline code.
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ __ GetObjectType(code_obj, t2, t2);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODET_TYPE));
+ }
+
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, t2);
+ }
+
+ // Replace BytecodeOffset with the feedback vector.
+ Register feedback_vector = a2;
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ GetObjectType(feedback_vector, t2, t2);
+ __ Branch(&install_baseline_code, ne, t2, Operand(FEEDBACK_VECTOR_TYPE));
+
+ // Save BytecodeOffset from the stack frame.
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ // Replace BytecodeOffset with the feedback vector.
+ __ St_d(feedback_vector,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ feedback_vector = no_reg;
+
+ // Compute baseline pc for bytecode offset.
+ ExternalReference get_baseline_pc_extref;
+ if (next_bytecode || is_osr) {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_next_executed_bytecode();
+ } else {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_bytecode_offset();
+ }
+
+ Register get_baseline_pc = a3;
+ __ li(get_baseline_pc, get_baseline_pc_extref);
+
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset.
+ // TODO(pthier): Investigate if it is feasible to handle this special case
+ // in TurboFan instead of here.
+ Label valid_bytecode_offset, function_entry_bytecode;
+ if (!is_osr) {
+ __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ }
+
+ __ Sub_d(kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeOffsetRegister,
+ (BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&valid_bytecode_offset);
+ // Get bytecode array from the stack frame.
+ __ Ld_d(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ // Save the accumulator register, since it's clobbered by the below call.
+ __ Push(kInterpreterAccumulatorRegister);
+ {
+ Register arg_reg_1 = a0;
+ Register arg_reg_2 = a1;
+ Register arg_reg_3 = a2;
+ __ Move(arg_reg_1, code_obj);
+ __ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister);
+ __ Move(arg_reg_3, kInterpreterBytecodeArrayRegister);
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallCFunction(get_baseline_pc, 3, 0);
+ }
+ __ Add_d(code_obj, code_obj, kReturnRegister0);
+ __ Pop(kInterpreterAccumulatorRegister);
+
+ if (is_osr) {
+ // Reset the OSR loop nesting depth to disarm back edges.
+ // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
+ // Sparkplug here.
+ // TODO(liuyu): Remove Ld as arm64 after register reallocation.
+ __ Ld_d(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ St_h(zero_reg,
+ FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrLoopNestingLevelOffset));
+ Generate_OSREntry(masm, code_obj,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ __ Add_d(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(code_obj);
+ }
+ __ Trap(); // Unreachable.
+
+ if (!is_osr) {
+ __ bind(&function_entry_bytecode);
+ // If the bytecode offset is kFunctionEntryOffset, get the start address of
+ // the first bytecode.
+ __ mov(kInterpreterBytecodeOffsetRegister, zero_reg);
+ if (next_bytecode) {
+ __ li(get_baseline_pc,
+ ExternalReference::baseline_pc_for_bytecode_offset());
+ }
+ __ Branch(&valid_bytecode_offset);
+ }
+
+ __ bind(&install_baseline_code);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister);
+ __ Push(closure);
+ __ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ __ Pop(kInterpreterAccumulatorRegister);
+ }
+ // Retry from the start after installing baseline code.
+ __ Branch(&start);
+}
+
+} // namespace
+
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false);
+}
+
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, true);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
+}
+
+void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
+ Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
+ masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
+}
+
+void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
+ MacroAssembler* masm) {
+ Generate_DynamicCheckMapsTrampoline<
+ DynamicCheckMapsWithFeedbackVectorDescriptor>(
+ masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
+}
+
+template <class Descriptor>
+void Builtins::Generate_DynamicCheckMapsTrampoline(
+ MacroAssembler* masm, Handle<Code> builtin_target) {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+
+ // Only save the registers that the DynamicCheckMaps builtin can clobber.
+ Descriptor descriptor;
+ RegList registers = descriptor.allocatable_registers();
+ // FLAG_debug_code is enabled CSA checks will call C function and so we need
+ // to save all CallerSaved registers too.
+ if (FLAG_debug_code) registers |= kJSCallerSaved;
+ __ MaybeSaveRegisters(registers);
+
+ // Load the immediate arguments from the deopt exit to pass to the builtin.
+ Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
+ Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
+ __ Ld_d(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
+ __ Ld_d(
+ slot_arg,
+ MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
+ __ Ld_d(
+ handler_arg,
+ MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
+ __ Call(builtin_target, RelocInfo::CODE_TARGET);
+
+ Label deopt, bailout;
+ __ Branch(&deopt, ne, a0,
+ Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kSuccess)));
+
+ __ MaybeRestoreRegisters(registers);
+ __ LeaveFrame(StackFrame::INTERNAL);
+ __ Ret();
+
+ __ bind(&deopt);
+ __ Branch(&bailout, eq, a0,
+ Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kBailout)));
+
+ if (FLAG_debug_code) {
+ __ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus, a0,
+ Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kDeopt)));
+ }
+ __ MaybeRestoreRegisters(registers);
+ __ LeaveFrame(StackFrame::INTERNAL);
+ Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
+ Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
+ __ Jump(deopt_eager, RelocInfo::CODE_TARGET);
+
+ __ bind(&bailout);
+ __ MaybeRestoreRegisters(registers);
+ __ LeaveFrame(StackFrame::INTERNAL);
+ Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
+ Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
+ __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 8f4bf4d06b..9a97f0fa4e 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -612,6 +612,16 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ lw(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
+ Operand(static_cast<int>(CodeKind::BASELINE)));
+}
+
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Register sfi_data,
Register scratch1,
@@ -620,7 +630,15 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ GetObjectType(sfi_data, scratch1, scratch1);
- __ Branch(is_baseline, eq, scratch1, Operand(BASELINE_DATA_TYPE));
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ Branch(&not_baseline, ne, scratch1, Operand(CODET_TYPE));
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ __ Branch(is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
+ }
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
__ lw(sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
@@ -1389,8 +1407,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&has_optimized_code_or_marker);
// Load the baseline code into the closure.
- __ Lw(a2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BaselineData::kBaselineCodeOffset));
+ __ Move(a2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, t4, t5);
__ JumpCodeObject(a2);
@@ -1779,7 +1796,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
}
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOrInterpreterDataOffset -
+ kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@@ -2723,12 +2741,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);
@@ -3964,7 +3976,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (!is_osr) {
Label start_with_baseline;
__ GetObjectType(code_obj, t6, t6);
- __ Branch(&start_with_baseline, eq, t6, Operand(BASELINE_DATA_TYPE));
+ __ Branch(&start_with_baseline, eq, t6, Operand(CODET_TYPE));
// Start with bytecode as there is no baseline code.
Builtin builtin_id = next_bytecode
@@ -3977,12 +3989,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
__ GetObjectType(code_obj, t6, t6);
- __ Assert(eq, AbortReason::kExpectedBaselineData, t6,
- Operand(BASELINE_DATA_TYPE));
+ __ Assert(eq, AbortReason::kExpectedBaselineData, t6, Operand(CODET_TYPE));
}
- // Load baseline code from baseline data.
- __ Lw(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, t2);
+ }
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 45e1c32f82..3f8824d97d 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -300,6 +300,16 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ Ld(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
+ Operand(static_cast<int>(CodeKind::BASELINE)));
+}
+
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
// the more general dispatch.
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
@@ -309,11 +319,18 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ GetObjectType(sfi_data, scratch1, scratch1);
- __ Branch(is_baseline, eq, scratch1, Operand(BASELINE_DATA_TYPE));
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ Branch(&not_baseline, ne, scratch1, Operand(CODET_TYPE));
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ __ Branch(is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
+ }
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
__ Ld(sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
-
__ bind(&done);
}
@@ -1402,8 +1419,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&has_optimized_code_or_marker);
// Load the baseline code into the closure.
- __ Ld(a2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BaselineData::kBaselineCodeOffset));
+ __ Move(a2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, t0, t1);
__ JumpCodeObject(a2);
@@ -1788,7 +1804,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
}
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ Ld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ Ld(a1, MemOperand(v0, Code::kDeoptimizationDataOrInterpreterDataOffset -
+ kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@@ -2814,12 +2831,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);
@@ -3549,7 +3560,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (!is_osr) {
Label start_with_baseline;
__ GetObjectType(code_obj, t2, t2);
- __ Branch(&start_with_baseline, eq, t2, Operand(BASELINE_DATA_TYPE));
+ __ Branch(&start_with_baseline, eq, t2, Operand(CODET_TYPE));
// Start with bytecode as there is no baseline code.
Builtin builtin_id = next_bytecode
@@ -3562,12 +3573,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
__ GetObjectType(code_obj, t2, t2);
- __ Assert(eq, AbortReason::kExpectedBaselineData, t2,
- Operand(BASELINE_DATA_TYPE));
+ __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODET_TYPE));
}
- // Load baseline code from baseline data.
- __ Ld(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, t2);
+ }
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 02b76175ec..4c2533e68d 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -1641,7 +1641,8 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
- r4, FieldMemOperand(r3, Code::kDeoptimizationDataOffset), r0);
+ r4, FieldMemOperand(r3, Code::kDeoptimizationDataOrInterpreterDataOffset),
+ r0);
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
@@ -2646,12 +2647,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ StoreU64(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);
diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
index f79e392f48..c90352bea1 100644
--- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
+++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
@@ -320,6 +320,15 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ Ld(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
+ Operand(static_cast<int>(CodeKind::BASELINE)));
+}
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
// the more general dispatch.
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
@@ -330,7 +339,8 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ GetObjectType(sfi_data, scratch1, scratch1);
- __ Branch(is_baseline, eq, scratch1, Operand(BASELINE_DATA_TYPE));
+ __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
+
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE),
Label::Distance::kNear);
__ LoadTaggedPointerField(
@@ -401,17 +411,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Lhu(a3,
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
__ LoadTaggedPointerField(
- scratch,
+ t1,
FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
__ bind(&loop);
__ Sub64(a3, a3, Operand(1));
__ Branch(&done_loop, lt, a3, Operand(zero_reg), Label::Distance::kNear);
- __ CalcScaledAddress(kScratchReg, scratch, a3, kTaggedSizeLog2);
+ __ CalcScaledAddress(kScratchReg, t1, a3, kTaggedSizeLog2);
__ LoadAnyTaggedField(
kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
__ Push(kScratchReg);
@@ -575,9 +583,14 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ li(s3, Operand(StackFrame::TypeToMarker(type)));
ExternalReference c_entry_fp = ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, masm->isolate());
- __ li(s4, c_entry_fp);
- __ Ld(s4, MemOperand(s4));
+ __ li(s5, c_entry_fp);
+ __ Ld(s4, MemOperand(s5));
__ Push(s1, s2, s3, s4);
+ // Clear c_entry_fp, now we've pushed its previous value to the stack.
+ // If the c_entry_fp is not already zero and we don't clear it, the
+ // SafeStackFrameIterator will assume we are executing C++ and miss the JS
+ // frames on top.
+ __ Sd(zero_reg, MemOperand(s5));
// Set up frame pointer for the frame to be pushed.
__ Add64(fp, sp, -EntryFrameConstants::kCallerFPOffset);
// Registers:
@@ -1160,9 +1173,9 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// store the bytecode offset.
if (FLAG_debug_code) {
UseScratchRegisterScope temps(masm);
- Register type = temps.Acquire();
- __ GetObjectType(feedback_vector, type, type);
- __ Assert(eq, AbortReason::kExpectedFeedbackVector, type,
+ Register invocation_count = temps.Acquire();
+ __ GetObjectType(feedback_vector, invocation_count, invocation_count);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count,
Operand(FEEDBACK_VECTOR_TYPE));
}
// Our stack is currently aligned. We have have to push something along with
@@ -1171,8 +1184,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
// `undefined` in the accumulator register, to skip the load in the baseline
// code.
- __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
- __ Push(feedback_vector, kInterpreterAccumulatorRegister);
+ __ Push(feedback_vector);
}
Label call_stack_guard;
@@ -1203,7 +1215,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
{
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
// Drop the frame created by the baseline call.
- __ Pop(fp, ra);
+ __ Pop(ra, fp);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
__ Trap();
@@ -1212,14 +1224,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ bind(&call_stack_guard);
{
ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
- Register new_target = descriptor.GetRegisterParameter(
- BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget);
-
FrameScope frame_scope(masm, StackFrame::INTERNAL);
// Save incoming new target or generator
- __ Push(zero_reg, new_target);
- __ CallRuntime(Runtime::kStackGuard);
- __ Pop(new_target, zero_reg);
+ __ Push(kJavaScriptCallNewTargetRegister);
+ __ SmiTag(frame_size);
+ __ Push(frame_size);
+ __ CallRuntime(Runtime::kStackGuardWithGap);
+ __ Pop(kJavaScriptCallNewTargetRegister);
}
__ Ret();
temps.Exclude(kScratchReg.bit() | kScratchReg2.bit());
@@ -1239,7 +1250,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// o ra: return address
//
// The function builds an interpreter frame. See InterpreterFrameConstants in
-// frames.h for its layout.
+// frames-constants.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register closure = a1;
Register feedback_vector = a2;
@@ -1466,36 +1477,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
- __ Ld(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ Ld(feedback_vector,
- FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
- __ Ld(scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
- __ Lh(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ LoadTaggedPointerField(
+ scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ Branch(&install_baseline_code, ne, scratch,
Operand(FEEDBACK_VECTOR_TYPE));
- // Read off the optimization state in the feedback vector.
- // TODO(v8:11429): Is this worth doing here? Baseline code will check it
- // anyway...
- __ Ld(optimization_state,
- FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
-
- // Check if there is optimized code or a optimization marker that needes to
- // be processed.
- __ And(
- scratch, optimization_state,
- Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
- __ Branch(&has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
+ // Check for an optimization marker.
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector,
+ &has_optimized_code_or_marker);
// Load the baseline code into the closure.
- __ LoadTaggedPointerField(
- a2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BaselineData::kBaselineCodeOffset));
+ __ Move(a2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, scratch, scratch2);
__ JumpCodeObject(a2);
@@ -1888,7 +1891,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
- a1, MemOperand(a0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ a1, MemOperand(a0, Code::kDeoptimizationDataOrInterpreterDataOffset -
+ kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@@ -2713,6 +2717,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+#if V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in t0 by the jump table trampoline.
// Convert to Smi for the runtime call
@@ -2728,7 +2733,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
for (Register gp_param_reg : wasm::kGpParamRegisters) {
gp_regs |= gp_param_reg.bit();
}
- // Also push x1, because we must push multiples of 16 bytes (see
+ // Also push a1, because we must push multiples of 16 bytes (see
// {TurboAssembler::PushCPURegList}.
CHECK_EQ(0, NumRegs(gp_regs) % 2);
@@ -2786,6 +2791,7 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
}
__ Ret();
}
+#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
@@ -2909,12 +2915,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Compute the handler entry address and jump to it.
UseScratchRegisterScope temp(masm);
Register scratch = temp.Acquire();
@@ -3640,7 +3640,6 @@ namespace {
void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
bool next_bytecode,
bool is_osr = false) {
- __ Push(zero_reg, kInterpreterAccumulatorRegister);
Label start;
__ bind(&start);
@@ -3649,7 +3648,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
// Get the Code object from the shared function info.
- Register code_obj = a4;
+ Register code_obj = s1;
__ LoadTaggedPointerField(
code_obj,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
@@ -3664,10 +3663,9 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ GetObjectType(code_obj, scratch, scratch);
- __ Branch(&start_with_baseline, eq, scratch, Operand(BASELINE_DATA_TYPE));
+ __ Branch(&start_with_baseline, eq, scratch, Operand(CODET_TYPE));
// Start with bytecode as there is no baseline code.
- __ Pop(zero_reg, kInterpreterAccumulatorRegister);
Builtin builtin_id = next_bytecode
? Builtin::kInterpreterEnterAtNextBytecode
: Builtin::kInterpreterEnterAtBytecode;
@@ -3681,13 +3679,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register scratch = temps.Acquire();
__ GetObjectType(code_obj, scratch, scratch);
__ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
- Operand(BASELINE_DATA_TYPE));
+ Operand(CODET_TYPE));
+ }
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ AssertCodeIsBaseline(masm, code_obj, scratch);
}
-
- // Load baseline code from baseline data.
- __ LoadTaggedPointerField(
- code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
-
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
__ LoadTaggedPointerField(
@@ -3701,7 +3699,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
Register type = temps.Acquire();
__ GetObjectType(feedback_vector, type, type);
- __ Branch(&install_baseline_code, eq, type, Operand(FEEDBACK_VECTOR_TYPE));
+ __ Branch(&install_baseline_code, ne, type, Operand(FEEDBACK_VECTOR_TYPE));
// Save BytecodeOffset from the stack frame.
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
@@ -3711,7 +3709,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
feedback_vector = no_reg;
// Compute baseline pc for bytecode offset.
- __ Push(zero_reg, kInterpreterAccumulatorRegister);
ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) {
get_baseline_pc_extref =
@@ -3744,6 +3741,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Get bytecode array from the stack frame.
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Push(kInterpreterAccumulatorRegister);
{
Register arg_reg_1 = a0;
Register arg_reg_2 = a1;
@@ -3755,13 +3753,15 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ CallCFunction(get_baseline_pc, 3, 0);
}
__ Add64(code_obj, code_obj, kReturnRegister0);
- __ Pop(kInterpreterAccumulatorRegister, zero_reg);
+ __ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
- __ Sd(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ __ Ld(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -3786,8 +3786,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ __ Pop(kInterpreterAccumulatorRegister);
}
// Retry from the start after installing baseline code.
__ Branch(&start);
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 5129cc6ee3..5ee2cf7c6a 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -1681,7 +1681,8 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
- r3, FieldMemOperand(r2, Code::kDeoptimizationDataOffset));
+ r3,
+ FieldMemOperand(r2, Code::kDeoptimizationDataOrInterpreterDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@@ -2679,12 +2680,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ StoreU64(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index 2724f9a200..5ad0319f63 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -156,11 +156,11 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, Builtin builtin,
CanonicalHandleScope canonical(isolate);
Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone);
- const int argc_with_recv =
- (argc == kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
- compiler::CodeAssemblerState state(
- isolate, &zone, argc_with_recv, CodeKind::BUILTIN, name,
- PoisoningMitigationLevel::kDontPoison, builtin);
+ const int argc_with_recv = (argc == kDontAdaptArgumentsSentinel)
+ ? 0
+ : argc + (kJSArgcIncludesReceiver ? 0 : 1);
+ compiler::CodeAssemblerState state(isolate, &zone, argc_with_recv,
+ CodeKind::BUILTIN, name, builtin);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(
&state, BuiltinAssemblerOptions(isolate, builtin),
@@ -183,9 +183,8 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, Builtin builtin,
CallInterfaceDescriptor descriptor(interface_descriptor);
// Ensure descriptor is already initialized.
DCHECK_LE(0, descriptor.GetRegisterParameterCount());
- compiler::CodeAssemblerState state(
- isolate, &zone, descriptor, CodeKind::BUILTIN, name,
- PoisoningMitigationLevel::kDontPoison, builtin);
+ compiler::CodeAssemblerState state(isolate, &zone, descriptor,
+ CodeKind::BUILTIN, name, builtin);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(
&state, BuiltinAssemblerOptions(isolate, builtin),
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index 2f94f6205f..cb3443284d 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -62,7 +62,6 @@ transitioning macro AllocateTypedArray(implicit context: Context)(
typedArray.bit_field.is_length_tracking = isLengthTracking;
typedArray.bit_field.is_backed_by_rab =
IsResizableArrayBuffer(buffer) && !IsSharedArrayBuffer(buffer);
- typed_array::AllocateJSTypedArrayExternalPointerEntry(typedArray);
if constexpr (isOnHeap) {
typed_array::SetJSTypedArrayOnHeapDataPtr(typedArray, elements, byteOffset);
} else {
diff --git a/deps/v8/src/builtins/typed-array-every.tq b/deps/v8/src/builtins/typed-array-every.tq
index fdd4961dee..8c662bffb7 100644
--- a/deps/v8/src/builtins/typed-array-every.tq
+++ b/deps/v8/src/builtins/typed-array-every.tq
@@ -7,24 +7,43 @@
namespace typed_array {
const kBuiltinNameEvery: constexpr string = '%TypedArray%.prototype.every';
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every
transitioning macro EveryAllElements(implicit context: Context)(
array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
thisArg: JSAny): Boolean {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
const length: uintptr = witness.Get().length;
+
+ // 6. Repeat, while k < len
for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // There is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer is detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
+
+ // 6c. Let testResult be ! ToBoolean(? Call(callbackfn, thisArg, « kValue,
+ // 𝔽(k), O »)).
// TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
// indices to optimize Convert<Number>(k) for the most common case.
const result = Call(
context, callbackfn, thisArg, value, Convert<Number>(k),
witness.GetStable());
+ // 6d. If testResult is false, return false.
if (!ToBoolean(result)) {
return False;
}
+ // 6e. Set k to k + 1. (done by the loop).
}
+
+ // 7. Return true.
return True;
}
diff --git a/deps/v8/src/builtins/typed-array-filter.tq b/deps/v8/src/builtins/typed-array-filter.tq
index 15d40f92eb..18fbce9f09 100644
--- a/deps/v8/src/builtins/typed-array-filter.tq
+++ b/deps/v8/src/builtins/typed-array-filter.tq
@@ -38,11 +38,15 @@ transitioning javascript builtin TypedArrayPrototypeFilter(
// 8. Let captured be 0.
// 9. Repeat, while k < len
for (let k: uintptr = 0; k < len; k++) {
- witness.Recheck() otherwise IsDetached;
-
+ let value: JSAny;
// a. Let Pk be ! ToString(k).
// b. Let kValue be ? Get(O, Pk).
- const value: JSAny = witness.Load(k);
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
// c. Let selected be ToBoolean(? Call(callbackfn, T, « kValue, k, O
// »)).
@@ -57,7 +61,7 @@ transitioning javascript builtin TypedArrayPrototypeFilter(
// ii. Increase captured by 1.
if (ToBoolean(selected)) kept.Push(value);
- // e.Increase k by 1.
+ // e. Increase k by 1. (done by the loop)
}
// 10. Let A be ? TypedArraySpeciesCreate(O, captured).
diff --git a/deps/v8/src/builtins/typed-array-find.tq b/deps/v8/src/builtins/typed-array-find.tq
index 24a13dbc23..b37b4ef8a9 100644
--- a/deps/v8/src/builtins/typed-array-find.tq
+++ b/deps/v8/src/builtins/typed-array-find.tq
@@ -7,24 +7,45 @@
namespace typed_array {
const kBuiltinNameFind: constexpr string = '%TypedArray%.prototype.find';
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.find
transitioning macro FindAllElements(implicit context: Context)(
- array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
+ array: typed_array::AttachedJSTypedArray, predicate: Callable,
thisArg: JSAny): JSAny {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
const length: uintptr = witness.Get().length;
+
+ // 6. Repeat, while k < len
for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // There is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer is detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
+
+ // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
// TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
// indices to optimize Convert<Number>(k) for the most common case.
const result = Call(
- context, callbackfn, thisArg, value, Convert<Number>(k),
+ context, predicate, thisArg, value, Convert<Number>(k),
witness.GetStable());
+
+ // 6d. If testResult is true, return kValue.
if (ToBoolean(result)) {
return value;
}
+
+ // 6e. Set k to k + 1. (done by the loop).
}
+
+ // 7. Return undefined.
return Undefined;
}
@@ -39,9 +60,9 @@ TypedArrayPrototypeFind(
otherwise NotTypedArray;
const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
- const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const predicate = Cast<Callable>(arguments[0]) otherwise NotCallable;
const thisArg = arguments[1];
- return FindAllElements(uarray, callbackfn, thisArg);
+ return FindAllElements(uarray, predicate, thisArg);
} label NotCallable deferred {
ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
} label NotTypedArray deferred {
diff --git a/deps/v8/src/builtins/typed-array-findindex.tq b/deps/v8/src/builtins/typed-array-findindex.tq
index 7bb01151f3..aede90dc7f 100644
--- a/deps/v8/src/builtins/typed-array-findindex.tq
+++ b/deps/v8/src/builtins/typed-array-findindex.tq
@@ -9,19 +9,33 @@ const kBuiltinNameFindIndex: constexpr string =
'%TypedArray%.prototype.findIndex';
transitioning macro FindIndexAllElements(implicit context: Context)(
- array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
+ array: typed_array::AttachedJSTypedArray, predicate: Callable,
thisArg: JSAny): Number {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
const length: uintptr = witness.Get().length;
+
+ // 6. Repeat, while k < len
for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // There is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer is detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
+
+ // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
// TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
// indices to optimize Convert<Number>(k) for the most common case.
const indexNumber: Number = Convert<Number>(k);
const result = Call(
- context, callbackfn, thisArg, value, indexNumber, witness.GetStable());
+ context, predicate, thisArg, value, indexNumber, witness.GetStable());
if (ToBoolean(result)) {
return indexNumber;
}
@@ -40,9 +54,9 @@ TypedArrayPrototypeFindIndex(
otherwise NotTypedArray;
const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
- const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const predicate = Cast<Callable>(arguments[0]) otherwise NotCallable;
const thisArg = arguments[1];
- return FindIndexAllElements(uarray, callbackfn, thisArg);
+ return FindIndexAllElements(uarray, predicate, thisArg);
} label NotCallable deferred {
ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
} label NotTypedArray deferred {
diff --git a/deps/v8/src/builtins/typed-array-findlast.tq b/deps/v8/src/builtins/typed-array-findlast.tq
index 634e17b936..15f67760c0 100644
--- a/deps/v8/src/builtins/typed-array-findlast.tq
+++ b/deps/v8/src/builtins/typed-array-findlast.tq
@@ -8,56 +8,28 @@ namespace typed_array {
const kBuiltinNameFindLast: constexpr string =
'%TypedArray%.prototype.findLast';
-// Continuation part of
-// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlast
-// when array buffer was detached.
-transitioning builtin FindLastAllElementsDetachedContinuation(
- implicit context: Context)(
- array: JSTypedArray, predicate: Callable, thisArg: JSAny,
- initialK: Number): JSAny {
- // 6. Repeat, while k ≥ 0
- for (let k: Number = initialK; k >= 0; k--) {
- // 6a. Let Pk be ! ToString(𝔽(k)).
- // there is no need to cast ToString to load elements.
-
- // 6b. Let kValue be ! Get(O, Pk).
- // kValue must be undefined when the buffer was detached.
-
- // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
- // 𝔽(k), O »)).
- // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
- // indices to optimize Convert<Number>(k) for the most common case.
- const result =
- Call(context, predicate, thisArg, Undefined, Convert<Number>(k), array);
- // 6d. If testResult is true, return kValue.
- if (ToBoolean(result)) {
- return Undefined;
- }
-
- // 6e. Set k to k - 1. (done by the loop).
- }
-
- // 7. Return undefined.
- return Undefined;
-}
-
// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlast
transitioning macro FindLastAllElements(implicit context: Context)(
array: typed_array::AttachedJSTypedArray, predicate: Callable,
- thisArg: JSAny): JSAny labels
-Bailout(Number) {
+ thisArg: JSAny): JSAny {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
// 3. Let len be O.[[ArrayLength]].
const length: uintptr = witness.Get().length;
// 5. Let k be len - 1.
// 6. Repeat, while k ≥ 0
for (let k: uintptr = length; k-- > 0;) {
- witness.Recheck() otherwise goto Bailout(Convert<Number>(k));
// 6a. Let Pk be ! ToString(𝔽(k)).
- // there is no need to cast ToString to load elements.
+ // There is no need to cast ToString to load elements.
// 6b. Let kValue be ! Get(O, Pk).
- const value: JSAny = witness.Load(k);
+ // kValue must be undefined when the buffer was detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
// 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
// 𝔽(k), O »)).
@@ -94,13 +66,7 @@ TypedArrayPrototypeFindLast(
// 4. If IsCallable(predicate) is false, throw a TypeError exception.
const predicate = Cast<Callable>(arguments[0]) otherwise NotCallable;
const thisArg = arguments[1];
- try {
- return FindLastAllElements(uarray, predicate, thisArg)
- otherwise Bailout;
- } label Bailout(k: Number) deferred {
- return FindLastAllElementsDetachedContinuation(
- uarray, predicate, thisArg, k);
- }
+ return FindLastAllElements(uarray, predicate, thisArg);
} label NotCallable deferred {
ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
} label NotTypedArray deferred {
diff --git a/deps/v8/src/builtins/typed-array-findlastindex.tq b/deps/v8/src/builtins/typed-array-findlastindex.tq
index 4b20114c91..56d139d8b1 100644
--- a/deps/v8/src/builtins/typed-array-findlastindex.tq
+++ b/deps/v8/src/builtins/typed-array-findlastindex.tq
@@ -8,57 +8,28 @@ namespace typed_array {
const kBuiltinNameFindLastIndex: constexpr string =
'%TypedArray%.prototype.findIndexLast';
-// Continuation part of
-// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlastindex
-// when array buffer was detached.
-transitioning builtin FindLastIndexAllElementsDetachedContinuation(
- implicit context: Context)(
- array: JSTypedArray, predicate: Callable, thisArg: JSAny,
- initialK: Number): Number {
- // 6. Repeat, while k ≥ 0
- for (let k: Number = initialK; k >= 0; k--) {
- // 6a. Let Pk be ! ToString(𝔽(k)).
- // there is no need to cast ToString to load elements.
-
- // 6b. Let kValue be ! Get(O, Pk).
- // kValue must be undefined when the buffer was detached.
-
- // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
- // 𝔽(k), O »)).
- // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
- // indices to optimize Convert<Number>(k) for the most common case.
- const indexNumber: Number = Convert<Number>(k);
- const result =
- Call(context, predicate, thisArg, Undefined, indexNumber, array);
- // 6d. If testResult is true, return 𝔽(k).
- if (ToBoolean(result)) {
- return indexNumber;
- }
-
- // 6e. Set k to k - 1. (done by the loop).
- }
-
- // 7. Return -1𝔽.
- return -1;
-}
-
// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlastindex
transitioning macro FindLastIndexAllElements(implicit context: Context)(
array: typed_array::AttachedJSTypedArray, predicate: Callable,
- thisArg: JSAny): Number labels
-Bailout(Number) {
+ thisArg: JSAny): Number {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
// 3. Let len be O.[[ArrayLength]].
const length: uintptr = witness.Get().length;
// 5. Let k be len - 1.
// 6. Repeat, while k ≥ 0
for (let k: uintptr = length; k-- > 0;) {
- witness.Recheck() otherwise goto Bailout(Convert<Number>(k));
// 6a. Let Pk be ! ToString(𝔽(k)).
- // there is no need to cast ToString to load elements.
+ // There is no need to cast ToString to load elements.
// 6b. Let kValue be ! Get(O, Pk).
- const value: JSAny = witness.Load(k);
+ // kValue must be undefined when the buffer was detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
// 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
// 𝔽(k), O »)).
@@ -96,13 +67,7 @@ TypedArrayPrototypeFindLastIndex(
const predicate = Cast<Callable>(arguments[0]) otherwise NotCallable;
const thisArg = arguments[1];
- try {
- return FindLastIndexAllElements(uarray, predicate, thisArg)
- otherwise Bailout;
- } label Bailout(k: Number) deferred {
- return FindLastIndexAllElementsDetachedContinuation(
- uarray, predicate, thisArg, k);
- }
+ return FindLastIndexAllElements(uarray, predicate, thisArg);
} label NotCallable deferred {
ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
} label NotTypedArray deferred {
diff --git a/deps/v8/src/builtins/typed-array-foreach.tq b/deps/v8/src/builtins/typed-array-foreach.tq
index d696d9c8dd..fa227bc75b 100644
--- a/deps/v8/src/builtins/typed-array-foreach.tq
+++ b/deps/v8/src/builtins/typed-array-foreach.tq
@@ -12,16 +12,33 @@ transitioning macro ForEachAllElements(implicit context: Context)(
thisArg: JSAny): Undefined {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
const length: uintptr = witness.Get().length;
+
+ // 6. Repeat, while k < len
for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // There is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer is detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
+
+ // 6c. Perform ? Call(callbackfn, thisArg, « kValue, 𝔽(k), O »).
// TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
// indices to optimize Convert<Number>(k) for the most common case.
Call(
context, callbackfn, thisArg, value, Convert<Number>(k),
witness.GetStable());
+
+ // 6d. Set k to k + 1. (done by the loop).
}
+
+ // 7. Return undefined.
return Undefined;
}
diff --git a/deps/v8/src/builtins/typed-array-reduce.tq b/deps/v8/src/builtins/typed-array-reduce.tq
index a54ed1040e..0261599106 100644
--- a/deps/v8/src/builtins/typed-array-reduce.tq
+++ b/deps/v8/src/builtins/typed-array-reduce.tq
@@ -12,11 +12,17 @@ transitioning macro ReduceAllElements(implicit context: Context)(
initialValue: JSAny|TheHole): JSAny {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
const length: uintptr = witness.Get().length;
+
let accumulator = initialValue;
for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ let value: JSAny;
+ try {
+ witness.Recheck()
+ otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
typeswitch (accumulator) {
case (TheHole): {
accumulator = value;
diff --git a/deps/v8/src/builtins/typed-array-reduceright.tq b/deps/v8/src/builtins/typed-array-reduceright.tq
index 9ba2f70de4..5449c4f1fc 100644
--- a/deps/v8/src/builtins/typed-array-reduceright.tq
+++ b/deps/v8/src/builtins/typed-array-reduceright.tq
@@ -8,6 +8,7 @@ namespace typed_array {
const kBuiltinNameReduceRight: constexpr string =
'%TypedArray%.prototype.reduceRight';
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduceright
transitioning macro ReduceRightAllElements(implicit context: Context)(
array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
initialValue: JSAny|TheHole): JSAny {
@@ -15,9 +16,14 @@ transitioning macro ReduceRightAllElements(implicit context: Context)(
const length: uintptr = witness.Get().length;
let accumulator = initialValue;
for (let k: uintptr = length; k-- > 0;) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ let value: JSAny;
+ try {
+ witness.Recheck()
+ otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
typeswitch (accumulator) {
case (TheHole): {
accumulator = value;
diff --git a/deps/v8/src/builtins/typed-array-set.tq b/deps/v8/src/builtins/typed-array-set.tq
index f4d2a40f41..eeb521e3f6 100644
--- a/deps/v8/src/builtins/typed-array-set.tq
+++ b/deps/v8/src/builtins/typed-array-set.tq
@@ -115,18 +115,6 @@ TypedArrayPrototypeSetArray(implicit context: Context, receiver: JSAny)(
IfDetached {
// Steps 9-13 are not observable, do them later.
- // TODO(v8:8906): This ported behaviour is an observable spec violation and
- // the comment below seems to be outdated. Consider removing this code.
- try {
- const _arrayArgNum = Cast<Number>(arrayArg) otherwise NotNumber;
- // For number as a first argument, throw TypeError instead of silently
- // ignoring the call, so that users know they did something wrong.
- // (Consistent with Firefox and Blink/WebKit)
- ThrowTypeError(MessageTemplate::kInvalidArgument);
- } label NotNumber {
- // Proceed to step 14.
- }
-
// 14. Let src be ? ToObject(array).
const src: JSReceiver = ToObject_Inline(context, arrayArg);
diff --git a/deps/v8/src/builtins/typed-array-some.tq b/deps/v8/src/builtins/typed-array-some.tq
index ecdfae1e8a..9946907680 100644
--- a/deps/v8/src/builtins/typed-array-some.tq
+++ b/deps/v8/src/builtins/typed-array-some.tq
@@ -7,24 +7,45 @@
namespace typed_array {
const kBuiltinNameSome: constexpr string = '%TypedArray%.prototype.some';
+// https://tc39.es/ecma262/#sec-%typedarray%.prototype.some
transitioning macro SomeAllElements(implicit context: Context)(
array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
thisArg: JSAny): Boolean {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
const length: uintptr = witness.Get().length;
+
+ // 6. Repeat, while k < len
for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // There is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer is detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
+
+ // 6c. Let testResult be ! ToBoolean(? Call(callbackfn, thisArg, « kValue,
+ // 𝔽(k), O »)).
// TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
// indices to optimize Convert<Number>(k) for the most common case.
const result = Call(
context, callbackfn, thisArg, value, Convert<Number>(k),
witness.GetStable());
+
+ // 6d. If testResult is true, return true.
if (ToBoolean(result)) {
return True;
}
+
+ // 6e. Set k to k + 1. (done by the loop).
}
+
+ // 7. Return false.
return False;
}
@@ -41,6 +62,7 @@ TypedArrayPrototypeSome(
const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
const thisArg = arguments[1];
+
return SomeAllElements(uarray, callbackfn, thisArg);
} label NotCallable deferred {
ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index 87bcb2fb59..582388b75d 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -161,10 +161,6 @@ macro GetTypedArrayAccessor(elementsKind: ElementsKind): TypedArrayAccessor {
unreachable;
}
-extern macro
-TypedArrayBuiltinsAssembler::AllocateJSTypedArrayExternalPointerEntry(
- JSTypedArray): void;
-
extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
JSTypedArray, ByteArray, uintptr): void;
extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr(
diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq
index 7fc4a03e35..ec786311be 100644
--- a/deps/v8/src/builtins/wasm.tq
+++ b/deps/v8/src/builtins/wasm.tq
@@ -369,16 +369,6 @@ builtin WasmArrayCopyWithChecks(
SmiFromUint32(srcIndex), SmiFromUint32(length));
}
-// We put all uint32 parameters at the beginning so that they are assigned to
-// registers.
-builtin WasmArrayCopy(
- dstIndex: uint32, srcIndex: uint32, length: uint32, dstArray: WasmArray,
- srcArray: WasmArray): JSAny {
- tail runtime::WasmArrayCopy(
- LoadContextFromFrame(), dstArray, SmiFromUint32(dstIndex), srcArray,
- SmiFromUint32(srcIndex), SmiFromUint32(length));
-}
-
// Redeclaration with different typing (value is an Object, not JSAny).
extern transitioning runtime
CreateDataProperty(implicit context: Context)(JSReceiver, JSAny, Object);
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 14186e3be6..f5ef0877bc 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -83,6 +83,35 @@ static void GenerateTailCallToReturnedCode(
namespace {
+enum class ArgumentsElementType {
+ kRaw, // Push arguments as they are.
+ kHandle // Dereference arguments before pushing.
+};
+
+void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
+ Register scratch,
+ ArgumentsElementType element_type) {
+ DCHECK(!AreAliased(array, argc, scratch, kScratchRegister));
+ Register counter = scratch;
+ Label loop, entry;
+ if (kJSArgcIncludesReceiver) {
+ __ leaq(counter, Operand(argc, -kJSArgcReceiverSlots));
+ } else {
+ __ movq(counter, argc);
+ }
+ __ jmp(&entry);
+ __ bind(&loop);
+ Operand value(array, counter, times_system_pointer_size, 0);
+ if (element_type == ArgumentsElementType::kHandle) {
+ __ movq(kScratchRegister, value);
+ value = Operand(kScratchRegister, 0);
+ }
+ __ Push(value);
+ __ bind(&entry);
+ __ decq(counter);
+ __ j(greater_equal, &loop, Label::kNear);
+}
+
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax: number of arguments
@@ -112,7 +141,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize));
// Copy arguments to the expression stack.
- __ PushArray(rbx, rax, rcx);
+ // rbx: Pointer to start of arguments.
+ // rax: Number of arguments.
+ Generate_PushArguments(masm, rbx, rax, rcx, ArgumentsElementType::kRaw);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
@@ -129,8 +160,10 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
- __ DropArguments(rbx, rcx, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ ret(0);
@@ -236,7 +269,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// InvokeFunction.
// Copy arguments to the expression stack.
- __ PushArray(rbx, rax, rcx);
+ // rbx: Pointer to start of arguments.
+ // rax: Number of arguments.
+ Generate_PushArguments(masm, rbx, rax, rcx, ArgumentsElementType::kRaw);
// Push implicit receiver.
__ Push(r8);
@@ -279,8 +314,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- __ DropArguments(rbx, rcx, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ ret(0);
// If the result is a smi, it is *not* an object in the ECMA sense.
@@ -607,18 +644,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&enough_stack_space);
- // Copy arguments to the stack in a loop.
+ // Copy arguments to the stack.
// Register rbx points to array of pointers to handle locations.
// Push the values of these handles.
- Label loop, entry;
- __ movq(rcx, rax);
- __ jmp(&entry, Label::kNear);
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(rbx, rcx, times_system_pointer_size, 0));
- __ Push(Operand(kScratchRegister, 0)); // dereference handle
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop, Label::kNear);
+ // rbx: Pointer to start of arguments.
+ // rax: Number of arguments.
+ Generate_PushArguments(masm, rbx, rax, rcx, ArgumentsElementType::kHandle);
// Push the receiver.
__ Push(r9);
@@ -651,6 +682,21 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
+static void AssertCodeIsBaselineAllowClobber(MacroAssembler* masm,
+ Register code, Register scratch) {
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ movl(scratch, FieldOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ cmpl(scratch, Immediate(static_cast<int>(CodeKind::BASELINE)));
+ __ Assert(equal, AbortReason::kExpectedBaselineData);
+}
+
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ return AssertCodeIsBaselineAllowClobber(masm, code, scratch);
+}
+
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Register sfi_data,
Register scratch1,
@@ -659,8 +705,21 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ LoadMap(scratch1, sfi_data);
- __ CmpInstanceType(scratch1, BASELINE_DATA_TYPE);
- __ j(equal, is_baseline);
+ __ CmpInstanceType(scratch1, CODET_TYPE);
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ j(not_equal, &not_baseline);
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ __ LoadCodeDataContainerCodeNonBuiltin(scratch1, sfi_data);
+ AssertCodeIsBaselineAllowClobber(masm, scratch1, scratch1);
+ } else {
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ }
+ __ j(equal, is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ j(equal, is_baseline);
+ }
__ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
__ j(not_equal, &done, Label::kNear);
@@ -736,7 +795,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movzxwq(
rcx, FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
-
+ if (kJSArgcIncludesReceiver) {
+ __ decq(rcx);
+ }
__ LoadTaggedPointerField(
rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
@@ -771,7 +832,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ jmp(&ok);
__ bind(&is_baseline);
- __ CmpObjectType(rcx, BASELINE_DATA_TYPE, rcx);
+ __ CmpObjectType(rcx, CODET_TYPE, rcx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
__ bind(&ok);
@@ -862,7 +923,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Operand(rbp, StandardFrameConstants::kArgCOffset));
__ leaq(actual_params_size,
Operand(actual_params_size, times_system_pointer_size,
- kSystemPointerSize));
+ kJSArgcIncludesReceiver ? 0 : kSystemPointerSize));
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -1107,7 +1168,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
// stack left to right.
//
// The live registers are:
-// o rax: actual argument count (not including the receiver)
+// o rax: actual argument count
// o rdi: the JS function object being called
// o rdx: the incoming new target or generator object
// o rsi: our context
@@ -1335,9 +1396,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&has_optimized_code_or_marker);
// Load the baseline code into the closure.
- __ LoadTaggedPointerField(rcx,
- FieldOperand(kInterpreterBytecodeArrayRegister,
- BaselineData::kBaselineCodeOffset));
+ __ Move(rcx, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(
masm, rcx, closure, kInterpreterBytecodeArrayRegister,
@@ -1374,7 +1433,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
InterpreterPushArgsMode mode) {
DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rbx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
@@ -1387,7 +1446,15 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ decl(rax);
}
- __ leal(rcx, Operand(rax, 1)); // Add one for receiver.
+ int argc_modification = kJSArgcIncludesReceiver ? 0 : 1;
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ argc_modification -= 1;
+ }
+ if (argc_modification != 0) {
+ __ leal(rcx, Operand(rax, argc_modification));
+ } else {
+ __ movl(rcx, rax);
+ }
// Add a stack check before pushing arguments.
__ StackOverflowCheck(rcx, &stack_overflow);
@@ -1395,11 +1462,6 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- // Don't copy receiver.
- __ decq(rcx);
- }
-
// rbx and rdx will be modified.
GenerateInterpreterPushArgs(masm, rcx, rbx, rdx);
@@ -1439,7 +1501,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- rdi : the constructor to call (can be any Object)
@@ -1462,7 +1524,12 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
// rcx and r8 will be modified.
- GenerateInterpreterPushArgs(masm, rax, rcx, r8);
+ Register argc_without_receiver = rax;
+ if (kJSArgcIncludesReceiver) {
+ argc_without_receiver = r11;
+ __ leaq(argc_without_receiver, Operand(rax, -kJSArgcReceiverSlots));
+ }
+ GenerateInterpreterPushArgs(masm, argc_without_receiver, rcx, r8);
// Push slot for the receiver to be constructed.
__ Push(Immediate(0));
@@ -1809,7 +1876,8 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// the LAZY deopt point. rax contains the arguments count, the return value
// from LAZY is always the last argument.
__ movq(Operand(rsp, rax, times_system_pointer_size,
- BuiltinContinuationFrameConstants::kFixedFrameSize),
+ BuiltinContinuationFrameConstants::kFixedFrameSize -
+ (kJSArgcIncludesReceiver ? kSystemPointerSize : 0)),
kScratchRegister);
}
__ movq(
@@ -1883,19 +1951,20 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadRoot(rdx, RootIndex::kUndefinedValue);
__ movq(rbx, rdx);
__ movq(rdi, args[0]);
- __ testq(rax, rax);
- __ j(zero, &no_this_arg, Label::kNear);
+ __ cmpq(rax, Immediate(JSParameterCount(0)));
+ __ j(equal, &no_this_arg, Label::kNear);
{
__ movq(rdx, args[1]);
- __ cmpq(rax, Immediate(1));
+ __ cmpq(rax, Immediate(JSParameterCount(1)));
__ j(equal, &no_arg_array, Label::kNear);
__ movq(rbx, args[2]);
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
- __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ rax, rdx, rcx, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1923,7 +1992,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// Function.prototype.apply() yet, we use a normal Call builtin here.
__ bind(&no_arguments);
{
- __ Move(rax, 0);
+ __ Move(rax, JSParameterCount(0));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
@@ -1937,7 +2006,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// ...
// rsp[8 * n] : Argument n-1
// rsp[8 * (n + 1)] : Argument n
- // rax contains the number of arguments, n, not counting the receiver.
+ // rax contains the number of arguments, n.
// 1. Get the callable to call (passed as receiver) from the stack.
{
@@ -1952,8 +2021,13 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 3. Make sure we have at least one argument.
{
Label done;
- __ testq(rax, rax);
- __ j(not_zero, &done, Label::kNear);
+ if (kJSArgcIncludesReceiver) {
+ __ cmpq(rax, Immediate(JSParameterCount(0)));
+ __ j(greater, &done, Label::kNear);
+ } else {
+ __ testq(rax, rax);
+ __ j(not_zero, &done, Label::kNear);
+ }
__ PushRoot(RootIndex::kUndefinedValue);
__ incq(rax);
__ bind(&done);
@@ -1989,18 +2063,19 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadRoot(rdi, RootIndex::kUndefinedValue);
__ movq(rdx, rdi);
__ movq(rbx, rdi);
- __ cmpq(rax, Immediate(1));
+ __ cmpq(rax, Immediate(JSParameterCount(1)));
__ j(below, &done, Label::kNear);
__ movq(rdi, args[1]); // target
__ j(equal, &done, Label::kNear);
__ movq(rdx, args[2]); // thisArgument
- __ cmpq(rax, Immediate(3));
+ __ cmpq(rax, Immediate(JSParameterCount(3)));
__ j(below, &done, Label::kNear);
__ movq(rbx, args[3]); // argumentsList
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ rax, rdx, rcx, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -2039,20 +2114,21 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadRoot(rdi, RootIndex::kUndefinedValue);
__ movq(rdx, rdi);
__ movq(rbx, rdi);
- __ cmpq(rax, Immediate(1));
+ __ cmpq(rax, Immediate(JSParameterCount(1)));
__ j(below, &done, Label::kNear);
__ movq(rdi, args[1]); // target
__ movq(rdx, rdi); // new.target defaults to target
__ j(equal, &done, Label::kNear);
__ movq(rbx, args[2]); // argumentsList
- __ cmpq(rax, Immediate(3));
+ __ cmpq(rax, Immediate(JSParameterCount(3)));
__ j(below, &done, Label::kNear);
__ movq(rdx, args[3]); // new.target
__ bind(&done);
__ DropArgumentsAndPushNewReceiver(
rax, masm->RootAsOperand(RootIndex::kUndefinedValue), rcx,
TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -2076,13 +2152,68 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+namespace {
+
+// Allocate new stack space for |count| arguments and shift all existing
+// arguments already on the stack. |pointer_to_new_space_out| points to the
+// first free slot on the stack to copy additional arguments to and
+// |argc_in_out| is updated to include |count|.
+void Generate_AllocateSpaceAndShiftExistingArguments(
+ MacroAssembler* masm, Register count, Register argc_in_out,
+ Register pointer_to_new_space_out, Register scratch1, Register scratch2) {
+ DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
+ scratch2, kScratchRegister));
+ // Use pointer_to_new_space_out as scratch until we set it to the correct
+ // value at the end.
+ Register old_rsp = pointer_to_new_space_out;
+ Register new_space = kScratchRegister;
+ __ movq(old_rsp, rsp);
+
+ __ leaq(new_space, Operand(count, times_system_pointer_size, 0));
+ __ AllocateStackSpace(new_space);
+
+ Register copy_count = argc_in_out;
+ if (!kJSArgcIncludesReceiver) {
+ // We have a spare register, so use it instead of clobbering argc.
+ // lea + add (to add the count to argc in the end) uses 1 less byte than
+ // inc + lea (with base, index and disp), at the cost of 1 extra register.
+ copy_count = scratch1;
+ __ leaq(copy_count, Operand(argc_in_out, 1)); // Include the receiver.
+ }
+ Register current = scratch2;
+ Register value = kScratchRegister;
+
+ Label loop, entry;
+ __ Move(current, 0);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(value, Operand(old_rsp, current, times_system_pointer_size, 0));
+ __ movq(Operand(rsp, current, times_system_pointer_size, 0), value);
+ __ incq(current);
+ __ bind(&entry);
+ __ cmpq(current, copy_count);
+ __ j(less_equal, &loop, Label::kNear);
+
+ // Point to the next free slot above the shifted arguments (copy_count + 1
+ // slot for the return address).
+ __ leaq(
+ pointer_to_new_space_out,
+ Operand(rsp, copy_count, times_system_pointer_size, kSystemPointerSize));
+ // We use addl instead of addq here because we can omit REX.W, saving 1 byte.
+ // We are especially constrained here because we are close to reaching the
+ // limit for a near jump to the stackoverflow label, so every byte counts.
+ __ addl(argc_in_out, count); // Update total number of arguments.
+}
+
+} // namespace
+
// static
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- rdi : target
- // -- rax : number of parameters on the stack (not including the receiver)
+ // -- rax : number of parameters on the stack
// -- rbx : arguments list (a FixedArray)
// -- rcx : len (number of elements to push from args)
// -- rdx : new.target (for [[Construct]])
@@ -2114,28 +2245,10 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Push additional arguments onto the stack.
// Move the arguments already in the stack,
// including the receiver and the return address.
- {
- Label copy, check;
- Register src = r8, dest = rsp, num = r9, current = r12;
- __ movq(src, rsp);
- __ leaq(kScratchRegister, Operand(rcx, times_system_pointer_size, 0));
- __ AllocateStackSpace(kScratchRegister);
- __ leaq(num, Operand(rax, 2)); // Number of words to copy.
- // +2 for receiver and return address.
- __ Move(current, 0);
- __ jmp(&check);
- __ bind(&copy);
- __ movq(kScratchRegister,
- Operand(src, current, times_system_pointer_size, 0));
- __ movq(Operand(dest, current, times_system_pointer_size, 0),
- kScratchRegister);
- __ incq(current);
- __ bind(&check);
- __ cmpq(current, num);
- __ j(less, &copy);
- __ leaq(r8, Operand(rsp, num, times_system_pointer_size, 0));
- }
-
+ // rcx: Number of arguments to make room for.
+ // rax: Number of arguments already on the stack.
+ // r8: Points to first free slot on the stack after arguments were shifted.
+ Generate_AllocateSpaceAndShiftExistingArguments(masm, rcx, rax, r8, r9, r12);
// Copy the additional arguments onto the stack.
{
Register value = r12;
@@ -2156,7 +2269,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ incl(current);
__ jmp(&loop);
__ bind(&done);
- __ addq(rax, current);
}
// Tail-call to the actual Call or Construct builtin.
@@ -2171,7 +2283,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the new target (for [[Construct]] calls)
// -- rdi : the target to call (can be any Object)
// -- rcx : start index (to support rest parameters)
@@ -2197,12 +2309,14 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Label stack_done, stack_overflow;
__ movq(r8, Operand(rbp, StandardFrameConstants::kArgCOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ decq(r8);
+ }
__ subl(r8, rcx);
__ j(less_equal, &stack_done);
{
// ----------- S t a t e -------------
- // -- rax : the number of arguments already in the stack (not including the
- // receiver)
+ // -- rax : the number of arguments already in the stack
// -- rbp : point to the caller stack frame
// -- rcx : start index (to support rest parameters)
// -- rdx : the new target (for [[Construct]] calls)
@@ -2216,29 +2330,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Forward the arguments from the caller frame.
// Move the arguments already in the stack,
// including the receiver and the return address.
- {
- Label copy, check;
- Register src = r9, dest = rsp, num = r12, current = r15;
- __ movq(src, rsp);
- __ leaq(kScratchRegister, Operand(r8, times_system_pointer_size, 0));
- __ AllocateStackSpace(kScratchRegister);
- __ leaq(num, Operand(rax, 2)); // Number of words to copy.
- // +2 for receiver and return address.
- __ Move(current, 0);
- __ jmp(&check);
- __ bind(&copy);
- __ movq(kScratchRegister,
- Operand(src, current, times_system_pointer_size, 0));
- __ movq(Operand(dest, current, times_system_pointer_size, 0),
- kScratchRegister);
- __ incq(current);
- __ bind(&check);
- __ cmpq(current, num);
- __ j(less, &copy);
- __ leaq(r9, Operand(rsp, num, times_system_pointer_size, 0));
- }
-
- __ addl(rax, r8); // Update total number of arguments.
+ // r8: Number of arguments to make room for.
+ // rax: Number of arguments already on the stack.
+ // r9: Points to first free slot on the stack after arguments were shifted.
+ Generate_AllocateSpaceAndShiftExistingArguments(masm, r8, rax, r9, r12,
+ r15);
// Point to the first argument to copy (skipping receiver).
__ leaq(rcx, Operand(rcx, times_system_pointer_size,
@@ -2274,7 +2370,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdi : the function to call (checked to be a JSFunction)
// -----------------------------------
@@ -2291,7 +2387,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ j(not_zero, &class_constructor);
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the shared function info.
// -- rdi : the function to call (checked to be a JSFunction)
// -----------------------------------
@@ -2308,7 +2404,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ j(not_zero, &done_convert);
{
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the shared function info.
// -- rdi : the function to call (checked to be a JSFunction)
// -- rsi : the function context.
@@ -2365,7 +2461,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ bind(&done_convert);
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the shared function info.
// -- rdi : the function to call (checked to be a JSFunction)
// -- rsi : the function context.
@@ -2373,7 +2469,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ movzxwq(
rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
-
__ InvokeFunctionCode(rdi, no_reg, rbx, rax, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
@@ -2389,7 +2484,7 @@ namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : new.target (only in case of [[Construct]])
// -- rdi : target (checked to be a JSBoundFunction)
// -----------------------------------
@@ -2403,7 +2498,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ j(zero, &no_bound_arguments);
{
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : new.target (only in case of [[Construct]])
// -- rdi : target (checked to be a JSBoundFunction)
// -- rcx : the [[BoundArguments]] (implemented as FixedArray)
@@ -2467,7 +2562,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdi : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(rdi);
@@ -2491,7 +2586,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdi : the target to call (can be any Object)
// -----------------------------------
StackArgumentsAccessor args(rax);
@@ -2540,7 +2635,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the new target (checked to be a constructor)
// -- rdi : the constructor to call (checked to be a JSFunction)
// -----------------------------------
@@ -2566,7 +2661,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the new target (checked to be a constructor)
// -- rdi : the constructor to call (checked to be a JSBoundFunction)
// -----------------------------------
@@ -2595,7 +2690,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- rdi : the constructor to call (can be any Object)
@@ -2676,8 +2771,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
}
// Load deoptimization data from the code object.
- __ LoadTaggedPointerField(rbx,
- FieldOperand(rax, Code::kDeoptimizationDataOffset));
+ __ LoadTaggedPointerField(
+ rbx, FieldOperand(rax, Code::kDeoptimizationDataOrInterpreterDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
__ SmiUntagField(
@@ -2876,6 +2971,9 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// Put the in_parameter count on the stack, we only need it at the very end
// when we pop the parameters off the stack.
Register in_param_count = rax;
+ if (kJSArgcIncludesReceiver) {
+ __ decq(in_param_count);
+ }
__ movq(MemOperand(rbp, kInParamCountOffset), in_param_count);
in_param_count = no_reg;
@@ -3691,12 +3789,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
__ bind(&skip);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
ExternalReference c_entry_fp_address = ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, masm->isolate());
@@ -4384,7 +4476,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CmpObjectType(code_obj, BASELINE_DATA_TYPE, kScratchRegister);
+ __ CmpObjectType(code_obj, CODET_TYPE, kScratchRegister);
__ j(equal, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -4397,16 +4489,17 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
- __ CmpObjectType(code_obj, BASELINE_DATA_TYPE, kScratchRegister);
+ __ CmpObjectType(code_obj, CODET_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kExpectedBaselineData);
}
// Load baseline code from baseline data.
- __ LoadTaggedPointerField(
- code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
}
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, r11);
+ }
// Load the feedback vector.
Register feedback_vector = r11;
diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS
index 364d34fb09..6644faa7fb 100644
--- a/deps/v8/src/codegen/OWNERS
+++ b/deps/v8/src/codegen/OWNERS
@@ -8,9 +8,6 @@ jkummerow@chromium.org
leszeks@chromium.org
mslekova@chromium.org
mvstanton@chromium.org
-mythria@chromium.org
neis@chromium.org
nicohartmann@chromium.org
-rmcilroy@chromium.org
-solanes@chromium.org
zhin@chromium.org
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index 970386be72..b49d9ed186 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -4786,7 +4786,7 @@ static Instr EncodeNeonPairwiseOp(NeonPairwiseOp op, NeonDataType dt,
void Assembler::vpadd(DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2) {
DCHECK(IsEnabled(NEON));
- // Dd = vpadd(Dn, Dm) SIMD integer pairwise ADD.
+ // Dd = vpadd(Dn, Dm) SIMD floating point pairwise ADD.
// Instruction details available in ARM DDI 0406C.b, A8-982.
int vd, d;
dst.split_code(&vd, &d);
@@ -5472,8 +5472,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
if (!entry.is_merged()) {
if (IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(entry.rmode())) {
int offset = pc_offset();
- saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(offset, entry.value()));
+ saved_handles_for_raw_object_ptr_.emplace_back(offset, entry.value());
Handle<HeapObject> object(reinterpret_cast<Address*>(entry.value()));
emit(object->ptr());
DCHECK(EmbeddedObjectMatches(offset, object));
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index 26d16406a6..43bbd86207 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -343,29 +343,32 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
DCHECK(root_array_available());
Label if_code_is_off_heap, out;
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
-
- DCHECK(!AreAliased(destination, scratch));
- DCHECK(!AreAliased(code_object, scratch));
-
- // Check whether the Code object is an off-heap trampoline. If so, call its
- // (off-heap) entry point directly without going through the (on-heap)
- // trampoline. Otherwise, just call the Code object as always.
- ldr(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
- tst(scratch, Operand(Code::IsOffHeapTrampoline::kMask));
- b(ne, &if_code_is_off_heap);
-
- // Not an off-heap trampoline, the entry point is at
- // Code::raw_instruction_start().
- add(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
- jmp(&out);
-
- // An off-heap trampoline, the entry point is loaded from the builtin entry
- // table.
- bind(&if_code_is_off_heap);
- ldr(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
- lsl(destination, scratch, Operand(kSystemPointerSizeLog2));
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+
+ DCHECK(!AreAliased(destination, scratch));
+ DCHECK(!AreAliased(code_object, scratch));
+
+ // Check whether the Code object is an off-heap trampoline. If so, call
+ // its (off-heap) entry point directly without going through the (on-heap)
+ // trampoline. Otherwise, just call the Code object as always.
+ ldr(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
+ tst(scratch, Operand(Code::IsOffHeapTrampoline::kMask));
+ b(ne, &if_code_is_off_heap);
+
+ // Not an off-heap trampoline, the entry point is at
+ // Code::raw_instruction_start().
+ add(destination, code_object,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ jmp(&out);
+
+ // An off-heap trampoline, the entry point is loaded from the builtin
+ // entry table.
+ bind(&if_code_is_off_heap);
+ ldr(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
+ lsl(destination, scratch, Operand(kSystemPointerSizeLog2));
+ }
add(destination, destination, kRootRegister);
ldr(destination,
MemOperand(destination, IsolateData::builtin_entry_table_offset()));
@@ -1669,7 +1672,11 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
str(scratch, MemOperand(dest, kSystemPointerSize, PostIndex));
sub(num, num, Operand(1), SetCC);
bind(&check);
- b(ge, &copy);
+ if (kJSArgcIncludesReceiver) {
+ b(gt, &copy);
+ } else {
+ b(ge, &copy);
+ }
}
// Fill remaining expected arguments with undefined values.
@@ -2660,10 +2667,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta));
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- mov(kSpeculationPoisonRegister, Operand(-1));
-}
-
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index 41bc5ec544..bcecaec429 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -560,8 +560,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- void ResetSpeculationPoisonRegister();
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
diff --git a/deps/v8/src/codegen/arm/register-arm.h b/deps/v8/src/codegen/arm/register-arm.h
index 6608ad4ede..8cc838945d 100644
--- a/deps/v8/src/codegen/arm/register-arm.h
+++ b/deps/v8/src/codegen/arm/register-arm.h
@@ -336,7 +336,6 @@ constexpr Register kReturnRegister2 = r2;
constexpr Register kJSFunctionRegister = r1;
constexpr Register kContextRegister = r7;
constexpr Register kAllocateSizeRegister = r1;
-constexpr Register kSpeculationPoisonRegister = r9;
constexpr Register kInterpreterAccumulatorRegister = r0;
constexpr Register kInterpreterBytecodeOffsetRegister = r5;
constexpr Register kInterpreterBytecodeArrayRegister = r6;
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index ef95b4e813..09065414cc 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -1516,9 +1516,7 @@ void MacroAssembler::AssertCodeT(Register object) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- CompareObjectType(
- object, temp, temp,
- V8_EXTERNAL_CODE_SPACE_BOOL ? CODE_DATA_CONTAINER_TYPE : CODE_TYPE);
+ CompareObjectType(object, temp, temp, CODET_TYPE);
Check(eq, AbortReason::kOperandIsNotACodeT);
}
@@ -1846,8 +1844,7 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
Condition cond) {
int64_t offset = CalculateTargetOffset(target, rmode, pc_);
if (RelocInfo::IsRuntimeEntry(rmode) && IsOnHeap()) {
- saved_offsets_for_runtime_entries_.push_back(
- std::make_pair(pc_offset(), offset));
+ saved_offsets_for_runtime_entries_.emplace_back(pc_offset(), offset);
offset = CalculateTargetOffset(target, RelocInfo::NONE, pc_);
}
JumpHelper(offset, rmode, cond);
@@ -1895,8 +1892,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
if (CanUseNearCallOrJump(rmode)) {
int64_t offset = CalculateTargetOffset(target, rmode, pc_);
if (IsOnHeap() && RelocInfo::IsRuntimeEntry(rmode)) {
- saved_offsets_for_runtime_entries_.push_back(
- std::make_pair(pc_offset(), offset));
+ saved_offsets_for_runtime_entries_.emplace_back(pc_offset(), offset);
offset = CalculateTargetOffset(target, RelocInfo::NONE, pc_);
}
DCHECK(IsNearCallOffset(offset));
@@ -2281,7 +2277,11 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
Register slots_to_copy = x4;
Register slots_to_claim = x5;
- Add(slots_to_copy, actual_argument_count, 1); // Copy with receiver.
+ if (kJSArgcIncludesReceiver) {
+ Mov(slots_to_copy, actual_argument_count);
+ } else {
+ Add(slots_to_copy, actual_argument_count, 1); // Copy with receiver.
+ }
Mov(slots_to_claim, extra_argument_count);
Tbz(extra_argument_count, 0, &even_extra_count);
@@ -2295,7 +2295,9 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
Register scratch = x11;
Add(slots_to_claim, extra_argument_count, 1);
And(scratch, actual_argument_count, 1);
- Eor(scratch, scratch, 1);
+ if (!kJSArgcIncludesReceiver) {
+ Eor(scratch, scratch, 1);
+ }
Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
}
@@ -2316,10 +2318,13 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
}
Bind(&skip_move);
- Register actual_argument_with_receiver = x4;
+ Register actual_argument_with_receiver = actual_argument_count;
Register pointer_next_value = x5;
- Add(actual_argument_with_receiver, actual_argument_count,
- 1); // {slots_to_copy} was scratched.
+ if (!kJSArgcIncludesReceiver) {
+ actual_argument_with_receiver = x4;
+ Add(actual_argument_with_receiver, actual_argument_count,
+ 1); // {slots_to_copy} was scratched.
+ }
// Copy extra arguments as undefined values.
{
@@ -2919,6 +2924,18 @@ void TurboAssembler::StoreTaggedField(const Register& value,
}
}
+void TurboAssembler::AtomicStoreTaggedField(const Register& value,
+ const Register& dst_base,
+ const Register& dst_index,
+ const Register& temp) {
+ Add(temp, dst_base, dst_index);
+ if (COMPRESS_POINTERS_BOOL) {
+ Stlr(value.W(), temp);
+ } else {
+ Stlr(value, temp);
+ }
+}
+
void TurboAssembler::DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand) {
ASM_CODE_COMMENT(this);
@@ -2950,6 +2967,40 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination,
Add(destination, kPtrComprCageBaseRegister, destination);
}
+void TurboAssembler::AtomicDecompressTaggedSigned(const Register& destination,
+ const Register& base,
+ const Register& index,
+ const Register& temp) {
+ ASM_CODE_COMMENT(this);
+ Add(temp, base, index);
+ Ldar(destination.W(), temp);
+ if (FLAG_debug_code) {
+ // Corrupt the top 32 bits. Made up of 16 fixed bits and 16 pc offset bits.
+ Add(destination, destination,
+ ((kDebugZapValue << 16) | (pc_offset() & 0xffff)) << 32);
+ }
+}
+
+void TurboAssembler::AtomicDecompressTaggedPointer(const Register& destination,
+ const Register& base,
+ const Register& index,
+ const Register& temp) {
+ ASM_CODE_COMMENT(this);
+ Add(temp, base, index);
+ Ldar(destination.W(), temp);
+ Add(destination, kPtrComprCageBaseRegister, destination);
+}
+
+void TurboAssembler::AtomicDecompressAnyTagged(const Register& destination,
+ const Register& base,
+ const Register& index,
+ const Register& temp) {
+ ASM_CODE_COMMENT(this);
+ Add(temp, base, index);
+ Ldar(destination.W(), temp);
+ Add(destination, kPtrComprCageBaseRegister, destination);
+}
+
void TurboAssembler::CheckPageFlag(const Register& object, int mask,
Condition cc, Label* condition_met) {
ASM_CODE_COMMENT(this);
@@ -3540,10 +3591,6 @@ void TurboAssembler::ComputeCodeStartAddress(const Register& rd) {
adr(rd, -pc_offset());
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- Mov(kSpeculationPoisonRegister, -1);
-}
-
void TurboAssembler::RestoreFPAndLR() {
static_assert(StandardFrameConstants::kCallerFPOffset + kSystemPointerSize ==
StandardFrameConstants::kCallerPCOffset,
@@ -3575,6 +3622,16 @@ void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) {
}
#endif // V8_ENABLE_WEBASSEMBLY
+void TurboAssembler::PopcntHelper(Register dst, Register src) {
+ UseScratchRegisterScope temps(this);
+ VRegister scratch = temps.AcquireV(kFormat8B);
+ VRegister tmp = src.Is32Bits() ? scratch.S() : scratch.D();
+ Fmov(tmp, src);
+ Cnt(scratch, scratch);
+ Addv(scratch.B(), scratch);
+ Fmov(dst, tmp);
+}
+
void TurboAssembler::I64x2BitMask(Register dst, VRegister src) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope scope(this);
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 9128ba2c18..11a5e7eb9a 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -1192,6 +1192,29 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION
+ void St1(const VRegister& vt, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, dst);
+ }
+ void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, vt2, dst);
+ }
+ void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, vt2, vt3, dst);
+ }
+ void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const VRegister& vt4, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, vt2, vt3, vt4, dst);
+ }
+ void St1(const VRegister& vt, int lane, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, lane, dst);
+ }
+
#define NEON_2VREG_SHIFT_MACRO_LIST(V) \
V(rshrn, Rshrn) \
V(rshrn2, Rshrn2) \
@@ -1347,8 +1370,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(const Register& rd);
- void ResetSpeculationPoisonRegister();
-
// ---------------------------------------------------------------------------
// Pointer compression Support
@@ -1373,6 +1394,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand);
+ void AtomicStoreTaggedField(const Register& value, const Register& dst_base,
+ const Register& dst_index, const Register& temp);
+
void DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand);
void DecompressTaggedPointer(const Register& destination,
@@ -1382,6 +1406,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand);
+ void AtomicDecompressTaggedSigned(const Register& destination,
+ const Register& base, const Register& index,
+ const Register& temp);
+ void AtomicDecompressTaggedPointer(const Register& destination,
+ const Register& base,
+ const Register& index,
+ const Register& temp);
+ void AtomicDecompressAnyTagged(const Register& destination,
+ const Register& base, const Register& index,
+ const Register& temp);
+
// Restore FP and LR from the values stored in the current frame. This will
// authenticate the LR when pointer authentication is enabled.
void RestoreFPAndLR();
@@ -1390,9 +1425,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void StoreReturnAddressInWasmExitFrame(Label* return_location);
#endif // V8_ENABLE_WEBASSEMBLY
- // Wasm SIMD helpers. These instructions don't have direct lowering to native
- // instructions. These helpers allow us to define the optimal code sequence,
- // and be used in both TurboFan and Liftoff.
+ // Wasm helpers. These instructions don't have direct lowering
+ // to native instructions. These helpers allow us to define the optimal code
+ // sequence, and be used in both TurboFan and Liftoff.
+ void PopcntHelper(Register dst, Register src);
void I64x2BitMask(Register dst, VRegister src);
void I64x2AllTrue(Register dst, VRegister src);
@@ -1645,28 +1681,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DCHECK(allow_macro_instructions());
ld4r(vt, vt2, vt3, vt4, src);
}
- void St1(const VRegister& vt, const MemOperand& dst) {
- DCHECK(allow_macro_instructions());
- st1(vt, dst);
- }
- void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
- DCHECK(allow_macro_instructions());
- st1(vt, vt2, dst);
- }
- void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
- const MemOperand& dst) {
- DCHECK(allow_macro_instructions());
- st1(vt, vt2, vt3, dst);
- }
- void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
- const VRegister& vt4, const MemOperand& dst) {
- DCHECK(allow_macro_instructions());
- st1(vt, vt2, vt3, vt4, dst);
- }
- void St1(const VRegister& vt, int lane, const MemOperand& dst) {
- DCHECK(allow_macro_instructions());
- st1(vt, lane, dst);
- }
void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
DCHECK(allow_macro_instructions());
st2(vt, vt2, dst);
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index 5b234526a4..29a4212aac 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -547,8 +547,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
@@ -701,8 +699,6 @@ constexpr Register kJSFunctionRegister = x1;
constexpr Register kContextRegister = cp;
constexpr Register kAllocateSizeRegister = x1;
-constexpr Register kSpeculationPoisonRegister = x23;
-
constexpr Register kInterpreterAccumulatorRegister = x0;
constexpr Register kInterpreterBytecodeOffsetRegister = x19;
constexpr Register kInterpreterBytecodeArrayRegister = x20;
diff --git a/deps/v8/src/codegen/assembler-arch.h b/deps/v8/src/codegen/assembler-arch.h
index 3569644e52..2e1b56c467 100644
--- a/deps/v8/src/codegen/assembler-arch.h
+++ b/deps/v8/src/codegen/assembler-arch.h
@@ -21,6 +21,8 @@
#include "src/codegen/mips/assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/assembler-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/assembler-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/assembler-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/codegen/assembler-inl.h b/deps/v8/src/codegen/assembler-inl.h
index c04b6d9687..084f12cc7e 100644
--- a/deps/v8/src/codegen/assembler-inl.h
+++ b/deps/v8/src/codegen/assembler-inl.h
@@ -21,6 +21,8 @@
#include "src/codegen/mips/assembler-mips-inl.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/assembler-mips64-inl.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/assembler-loong64-inl.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/assembler-s390-inl.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc
index dfd406694a..cacbfbd679 100644
--- a/deps/v8/src/codegen/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -248,6 +248,12 @@ AssemblerBase::AssemblerBase(const AssemblerOptions& options,
if (!buffer_) buffer_ = NewAssemblerBuffer(kDefaultBufferSize);
buffer_start_ = buffer_->start();
pc_ = buffer_start_;
+ if (IsOnHeap()) {
+ saved_handles_for_raw_object_ptr_.reserve(
+ kSavedHandleForRawObjectsInitialSize);
+ saved_offsets_for_runtime_entries_.reserve(
+ kSavedOffsetForRuntimeEntriesInitialSize);
+ }
}
AssemblerBase::~AssemblerBase() = default;
diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h
index 7373b5d48b..f1e5b85f1f 100644
--- a/deps/v8/src/codegen/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -276,8 +276,10 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
int pc_offset() const { return static_cast<int>(pc_ - buffer_start_); }
int pc_offset_for_safepoint() {
-#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
- // Mips needs it's own implementation to avoid trampoline's influence.
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
+ defined(V8_TARGET_ARCH_LOONG64)
+ // MIPS and LOONG need to use their own implementation to avoid trampoline's
+ // influence.
UNREACHABLE();
#else
return pc_offset();
@@ -418,6 +420,10 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
CodeCommentsWriter code_comments_writer_;
// Relocation information when code allocated directly on heap.
+ // These constants correspond to the 99% percentile of a selected number of JS
+ // frameworks and benchmarks, including jquery, lodash, d3 and speedometer3.
+ const int kSavedHandleForRawObjectsInitialSize = 60;
+ const int kSavedOffsetForRuntimeEntriesInitialSize = 100;
std::vector<std::pair<uint32_t, Address>> saved_handles_for_raw_object_ptr_;
std::vector<std::pair<uint32_t, uint32_t>> saved_offsets_for_runtime_entries_;
diff --git a/deps/v8/src/codegen/atomic-memory-order.h b/deps/v8/src/codegen/atomic-memory-order.h
new file mode 100644
index 0000000000..fc56cd34e3
--- /dev/null
+++ b/deps/v8/src/codegen/atomic-memory-order.h
@@ -0,0 +1,35 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_ATOMIC_MEMORY_ORDER_H_
+#define V8_CODEGEN_ATOMIC_MEMORY_ORDER_H_
+
+#include <ostream>
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+// Atomic memory orders supported by the compiler.
+enum class AtomicMemoryOrder : uint8_t { kAcqRel, kSeqCst };
+
+inline size_t hash_value(AtomicMemoryOrder order) {
+ return static_cast<uint8_t>(order);
+}
+
+inline std::ostream& operator<<(std::ostream& os, AtomicMemoryOrder order) {
+ switch (order) {
+ case AtomicMemoryOrder::kAcqRel:
+ return os << "kAcqRel";
+ case AtomicMemoryOrder::kSeqCst:
+ return os << "kSeqCst";
+ }
+ UNREACHABLE();
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_ATOMIC_MEMORY_ORDER_H_
diff --git a/deps/v8/src/codegen/code-factory.cc b/deps/v8/src/codegen/code-factory.cc
index f3cb604478..dcf19a0ad5 100644
--- a/deps/v8/src/codegen/code-factory.cc
+++ b/deps/v8/src/codegen/code-factory.cc
@@ -378,24 +378,47 @@ Callable CodeFactory::ArraySingleArgumentConstructor(
#ifdef V8_IS_TSAN
// static
-Builtin CodeFactory::GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode, int size) {
- if (size == kInt8Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? Builtin::kTSANRelaxedStore8IgnoreFP
- : Builtin::kTSANRelaxedStore8SaveFP;
- } else if (size == kInt16Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? Builtin::kTSANRelaxedStore16IgnoreFP
- : Builtin::kTSANRelaxedStore16SaveFP;
- } else if (size == kInt32Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? Builtin::kTSANRelaxedStore32IgnoreFP
- : Builtin::kTSANRelaxedStore32SaveFP;
+Builtin CodeFactory::GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
+ std::memory_order order) {
+ if (order == std::memory_order_relaxed) {
+ if (size == kInt8Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANRelaxedStore8IgnoreFP
+ : Builtin::kTSANRelaxedStore8SaveFP;
+ } else if (size == kInt16Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANRelaxedStore16IgnoreFP
+ : Builtin::kTSANRelaxedStore16SaveFP;
+ } else if (size == kInt32Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANRelaxedStore32IgnoreFP
+ : Builtin::kTSANRelaxedStore32SaveFP;
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANRelaxedStore64IgnoreFP
+ : Builtin::kTSANRelaxedStore64SaveFP;
+ }
} else {
- CHECK_EQ(size, kInt64Size);
- return fp_mode == SaveFPRegsMode::kIgnore
- ? Builtin::kTSANRelaxedStore64IgnoreFP
- : Builtin::kTSANRelaxedStore64SaveFP;
+ DCHECK_EQ(order, std::memory_order_seq_cst);
+ if (size == kInt8Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANSeqCstStore8IgnoreFP
+ : Builtin::kTSANSeqCstStore8SaveFP;
+ } else if (size == kInt16Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANSeqCstStore16IgnoreFP
+ : Builtin::kTSANSeqCstStore16SaveFP;
+ } else if (size == kInt32Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANSeqCstStore32IgnoreFP
+ : Builtin::kTSANSeqCstStore32SaveFP;
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANSeqCstStore64IgnoreFP
+ : Builtin::kTSANSeqCstStore64SaveFP;
+ }
}
}
diff --git a/deps/v8/src/codegen/code-factory.h b/deps/v8/src/codegen/code-factory.h
index 4780678dad..05b27bef0e 100644
--- a/deps/v8/src/codegen/code-factory.h
+++ b/deps/v8/src/codegen/code-factory.h
@@ -90,7 +90,8 @@ class V8_EXPORT_PRIVATE CodeFactory final {
AllocationSiteOverrideMode override_mode);
#ifdef V8_IS_TSAN
- static Builtin GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode, int size);
+ static Builtin GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
+ std::memory_order order);
static Builtin GetTSANRelaxedLoadStub(SaveFPRegsMode fp_mode, int size);
#endif // V8_IS_TSAN
};
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index e25135dece..92686eff12 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -2193,9 +2193,10 @@ TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(
}
template <typename Array, typename TIndex, typename TValue>
-TNode<TValue> CodeStubAssembler::LoadArrayElement(
- TNode<Array> array, int array_header_size, TNode<TIndex> index_node,
- int additional_offset, LoadSensitivity needs_poisoning) {
+TNode<TValue> CodeStubAssembler::LoadArrayElement(TNode<Array> array,
+ int array_header_size,
+ TNode<TIndex> index_node,
+ int additional_offset) {
// TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants?
static_assert(std::is_same<TIndex, Smi>::value ||
std::is_same<TIndex, UintPtrT>::value ||
@@ -2210,23 +2211,17 @@ TNode<TValue> CodeStubAssembler::LoadArrayElement(
CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(array),
array_header_size));
constexpr MachineType machine_type = MachineTypeOf<TValue>::value;
- // TODO(gsps): Remove the Load case once LoadFromObject supports poisoning
- if (needs_poisoning == LoadSensitivity::kSafe) {
- return UncheckedCast<TValue>(LoadFromObject(machine_type, array, offset));
- } else {
- return UncheckedCast<TValue>(
- Load(machine_type, array, offset, needs_poisoning));
- }
+ return UncheckedCast<TValue>(LoadFromObject(machine_type, array, offset));
}
template V8_EXPORT_PRIVATE TNode<MaybeObject>
CodeStubAssembler::LoadArrayElement<TransitionArray, IntPtrT>(
- TNode<TransitionArray>, int, TNode<IntPtrT>, int, LoadSensitivity);
+ TNode<TransitionArray>, int, TNode<IntPtrT>, int);
template <typename TIndex>
TNode<Object> CodeStubAssembler::LoadFixedArrayElement(
TNode<FixedArray> object, TNode<TIndex> index, int additional_offset,
- LoadSensitivity needs_poisoning, CheckBounds check_bounds) {
+ CheckBounds check_bounds) {
// TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants?
static_assert(std::is_same<TIndex, Smi>::value ||
std::is_same<TIndex, UintPtrT>::value ||
@@ -2238,25 +2233,22 @@ TNode<Object> CodeStubAssembler::LoadFixedArrayElement(
if (NeedsBoundsCheck(check_bounds)) {
FixedArrayBoundsCheck(object, index, additional_offset);
}
- TNode<MaybeObject> element =
- LoadArrayElement(object, FixedArray::kHeaderSize, index,
- additional_offset, needs_poisoning);
+ TNode<MaybeObject> element = LoadArrayElement(object, FixedArray::kHeaderSize,
+ index, additional_offset);
return CAST(element);
}
template V8_EXPORT_PRIVATE TNode<Object>
CodeStubAssembler::LoadFixedArrayElement<Smi>(TNode<FixedArray>, TNode<Smi>,
- int, LoadSensitivity,
- CheckBounds);
+ int, CheckBounds);
template V8_EXPORT_PRIVATE TNode<Object>
CodeStubAssembler::LoadFixedArrayElement<UintPtrT>(TNode<FixedArray>,
TNode<UintPtrT>, int,
- LoadSensitivity,
CheckBounds);
template V8_EXPORT_PRIVATE TNode<Object>
CodeStubAssembler::LoadFixedArrayElement<IntPtrT>(TNode<FixedArray>,
TNode<IntPtrT>, int,
- LoadSensitivity, CheckBounds);
+ CheckBounds);
void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array,
TNode<Smi> index,
@@ -2291,9 +2283,8 @@ void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array,
TNode<Object> CodeStubAssembler::LoadPropertyArrayElement(
TNode<PropertyArray> object, TNode<IntPtrT> index) {
int additional_offset = 0;
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe;
return CAST(LoadArrayElement(object, PropertyArray::kHeaderSize, index,
- additional_offset, needs_poisoning));
+ additional_offset));
}
TNode<IntPtrT> CodeStubAssembler::LoadPropertyArrayLength(
@@ -2648,7 +2639,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
TNode<MaybeObject> CodeStubAssembler::LoadWeakFixedArrayElement(
TNode<WeakFixedArray> object, TNode<IntPtrT> index, int additional_offset) {
return LoadArrayElement(object, WeakFixedArray::kHeaderSize, index,
- additional_offset, LoadSensitivity::kSafe);
+ additional_offset);
}
TNode<Float64T> CodeStubAssembler::LoadFixedDoubleArrayElement(
@@ -2934,11 +2925,18 @@ TNode<BytecodeArray> CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(
Label check_for_interpreter_data(this, &var_result);
Label done(this, &var_result);
- GotoIfNot(HasInstanceType(var_result.value(), BASELINE_DATA_TYPE),
+ GotoIfNot(HasInstanceType(var_result.value(), CODET_TYPE),
&check_for_interpreter_data);
- TNode<HeapObject> baseline_data = LoadObjectField<HeapObject>(
- var_result.value(), BaselineData::kDataOffset);
- var_result = baseline_data;
+ {
+ TNode<Code> code = FromCodeT(CAST(var_result.value()));
+ CSA_ASSERT(
+ this, Word32Equal(DecodeWord32<Code::KindField>(LoadObjectField<Int32T>(
+ code, Code::kFlagsOffset)),
+ Int32Constant(static_cast<int>(CodeKind::BASELINE))));
+ TNode<HeapObject> baseline_data = LoadObjectField<HeapObject>(
+ code, Code::kDeoptimizationDataOrInterpreterDataOffset);
+ var_result = baseline_data;
+ }
Goto(&check_for_interpreter_data);
BIND(&check_for_interpreter_data);
@@ -3197,7 +3195,8 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
// Resize the capacity of the fixed array if it doesn't fit.
TNode<IntPtrT> first = arg_index->value();
- TNode<BInt> growth = IntPtrToBInt(IntPtrSub(args->GetLength(), first));
+ TNode<BInt> growth =
+ IntPtrToBInt(IntPtrSub(args->GetLengthWithoutReceiver(), first));
PossiblyGrowElementsCapacity(kind, array, var_length.value(), &var_elements,
growth, &pre_bailout);
@@ -4350,17 +4349,11 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
{
bool handle_old_space = !FLAG_young_generation_large_objects;
if (handle_old_space) {
- if (extract_flags & ExtractFixedArrayFlag::kNewSpaceAllocationOnly) {
- handle_old_space = false;
- CSA_ASSERT(this, Word32BinaryNot(FixedArraySizeDoesntFitInNewSpace(
- count, FixedArray::kHeaderSize)));
- } else {
- int constant_count;
- handle_old_space =
- !TryGetIntPtrOrSmiConstantValue(count, &constant_count) ||
- (constant_count >
- FixedArray::GetMaxLengthForNewSpaceAllocation(PACKED_ELEMENTS));
- }
+ int constant_count;
+ handle_old_space =
+ !TryGetIntPtrOrSmiConstantValue(count, &constant_count) ||
+ (constant_count >
+ FixedArray::GetMaxLengthForNewSpaceAllocation(PACKED_ELEMENTS));
}
Label old_space(this, Label::kDeferred);
@@ -4563,10 +4556,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
var_holes_converted != nullptr ? HoleConversionMode::kConvertToUndefined
: HoleConversionMode::kDontConvert;
TVARIABLE(FixedArrayBase, var_result);
- const AllocationFlags allocation_flags =
- (extract_flags & ExtractFixedArrayFlag::kNewSpaceAllocationOnly)
- ? CodeStubAssembler::kNone
- : CodeStubAssembler::kAllowLargeObjectAllocation;
+ auto allocation_flags = CodeStubAssembler::kAllowLargeObjectAllocation;
if (!first) {
first = IntPtrOrSmiConstant<TIndex>(0);
}
@@ -9535,7 +9525,8 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
GetCreationContext(CAST(holder), if_bailout);
var_value = CallBuiltin(
Builtin::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver,
- creation_context, getter, IntPtrConstant(0), receiver);
+ creation_context, getter, IntPtrConstant(i::JSParameterCount(0)),
+ receiver);
Goto(&done);
BIND(&runtime);
@@ -13806,9 +13797,8 @@ void CodeStubAssembler::ThrowIfArrayBufferViewBufferIsDetached(
TNode<RawPtrT> CodeStubAssembler::LoadJSArrayBufferBackingStorePtr(
TNode<JSArrayBuffer> array_buffer) {
- return LoadExternalPointerFromObject(array_buffer,
- JSArrayBuffer::kBackingStoreOffset,
- kArrayBufferBackingStoreTag);
+ return LoadObjectField<RawPtrT>(array_buffer,
+ JSArrayBuffer::kBackingStoreOffset);
}
TNode<JSArrayBuffer> CodeStubAssembler::LoadJSArrayBufferViewBuffer(
@@ -14093,7 +14083,8 @@ TNode<RawPtrT> CodeStubArguments::AtIndexPtr(TNode<IntPtrT> index) const {
}
TNode<Object> CodeStubArguments::AtIndex(TNode<IntPtrT> index) const {
- CSA_ASSERT(assembler_, assembler_->UintPtrOrSmiLessThan(index, GetLength()));
+ CSA_ASSERT(assembler_, assembler_->UintPtrOrSmiLessThan(
+ index, GetLengthWithoutReceiver()));
return assembler_->LoadFullTagged(AtIndexPtr(index));
}
@@ -14101,9 +14092,19 @@ TNode<Object> CodeStubArguments::AtIndex(int index) const {
return AtIndex(assembler_->IntPtrConstant(index));
}
+TNode<IntPtrT> CodeStubArguments::GetLengthWithoutReceiver() const {
+ TNode<IntPtrT> argc = argc_;
+ if (kJSArgcIncludesReceiver) {
+ argc = assembler_->IntPtrSub(argc, assembler_->IntPtrConstant(1));
+ }
+ return argc;
+}
+
TNode<IntPtrT> CodeStubArguments::GetLengthWithReceiver() const {
- TNode<IntPtrT> argc = GetLength();
- argc = assembler_->IntPtrAdd(argc, assembler_->IntPtrConstant(1));
+ TNode<IntPtrT> argc = argc_;
+ if (!kJSArgcIncludesReceiver) {
+ argc = assembler_->IntPtrAdd(argc, assembler_->IntPtrConstant(1));
+ }
return argc;
}
@@ -14113,8 +14114,9 @@ TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
CodeStubAssembler::Label argument_missing(assembler_),
argument_done(assembler_, &result);
- assembler_->GotoIf(assembler_->UintPtrGreaterThanOrEqual(index, argc_),
- &argument_missing);
+ assembler_->GotoIf(
+ assembler_->UintPtrGreaterThanOrEqual(index, GetLengthWithoutReceiver()),
+ &argument_missing);
result = AtIndex(index);
assembler_->Goto(&argument_done);
@@ -14135,7 +14137,7 @@ void CodeStubArguments::ForEach(
first = assembler_->IntPtrConstant(0);
}
if (last == nullptr) {
- last = argc_;
+ last = GetLengthWithoutReceiver();
}
TNode<RawPtrT> start = AtIndexPtr(first);
TNode<RawPtrT> end = AtIndexPtr(last);
@@ -14150,8 +14152,7 @@ void CodeStubArguments::ForEach(
}
void CodeStubArguments::PopAndReturn(TNode<Object> value) {
- TNode<IntPtrT> pop_count =
- assembler_->IntPtrAdd(argc_, assembler_->IntPtrConstant(1));
+ TNode<IntPtrT> pop_count = GetLengthWithReceiver();
assembler_->PopAndReturn(pop_count, value);
}
@@ -14336,7 +14337,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
int32_t case_values[] = {
BYTECODE_ARRAY_TYPE,
- BASELINE_DATA_TYPE,
+ CODET_TYPE,
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
FUNCTION_TEMPLATE_INFO_TYPE,
@@ -14380,7 +14381,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
// IsBaselineData: Execute baseline code
BIND(&check_is_baseline_data);
{
- TNode<CodeT> baseline_code = LoadBaselineDataBaselineCode(CAST(sfi_data));
+ TNode<CodeT> baseline_code = CAST(sfi_data);
sfi_code = FromCodeT(baseline_code);
Goto(&done);
}
@@ -14563,7 +14564,15 @@ TNode<Object> CodeStubAssembler::GetArgumentValue(TorqueStructArguments args,
}
TorqueStructArguments CodeStubAssembler::GetFrameArguments(
- TNode<RawPtrT> frame, TNode<IntPtrT> argc) {
+ TNode<RawPtrT> frame, TNode<IntPtrT> argc,
+ FrameArgumentsArgcType argc_type) {
+ if (kJSArgcIncludesReceiver &&
+ argc_type == FrameArgumentsArgcType::kCountExcludesReceiver) {
+ argc = IntPtrAdd(argc, IntPtrConstant(kJSArgcReceiverSlots));
+ } else if (!kJSArgcIncludesReceiver &&
+ argc_type == FrameArgumentsArgcType::kCountIncludesReceiver) {
+ argc = IntPtrSub(argc, IntPtrConstant(1));
+ }
return CodeStubArguments(this, argc, frame).GetTorqueArguments();
}
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 008af6006f..f869ac687f 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -261,16 +261,17 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
#define CSA_ASSERT_BRANCH(csa, gen, ...) \
(csa)->Assert(gen, #gen, __FILE__, __LINE__, CSA_ASSERT_ARGS(__VA_ARGS__))
-#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
- (csa)->Assert( \
- [&]() -> TNode<BoolT> { \
- const TNode<Word32T> argc = (csa)->UncheckedParameter<Word32T>( \
- Descriptor::kJSActualArgumentsCount); \
- return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
- }, \
- "argc " #op " " #expected, __FILE__, __LINE__, \
- {{SmiFromInt32((csa)->UncheckedParameter<Int32T>( \
- Descriptor::kJSActualArgumentsCount)), \
+#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
+ (csa)->Assert( \
+ [&]() -> TNode<BoolT> { \
+ const TNode<Word32T> argc = (csa)->UncheckedParameter<Word32T>( \
+ Descriptor::kJSActualArgumentsCount); \
+ return (csa)->Op(argc, \
+ (csa)->Int32Constant(i::JSParameterCount(expected))); \
+ }, \
+ "argc " #op " " #expected, __FILE__, __LINE__, \
+ {{SmiFromInt32((csa)->UncheckedParameter<Int32T>( \
+ Descriptor::kJSActualArgumentsCount)), \
"argc"}})
#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \
@@ -1107,15 +1108,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<RawPtrT> LoadJSTypedArrayExternalPointerPtr(
TNode<JSTypedArray> holder) {
- return LoadExternalPointerFromObject(holder,
- JSTypedArray::kExternalPointerOffset,
- kTypedArrayExternalPointerTag);
+ return LoadObjectField<RawPtrT>(holder,
+ JSTypedArray::kExternalPointerOffset);
}
void StoreJSTypedArrayExternalPointerPtr(TNode<JSTypedArray> holder,
TNode<RawPtrT> value) {
- StoreExternalPointerToObject(holder, JSTypedArray::kExternalPointerOffset,
- value, kTypedArrayExternalPointerTag);
+ StoreObjectFieldNoWriteBarrier<RawPtrT>(
+ holder, JSTypedArray::kExternalPointerOffset, value);
}
// Load value from current parent frame by given offset in bytes.
@@ -1448,40 +1448,35 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Array is any array-like type that has a fixed header followed by
// tagged elements.
template <typename Array, typename TIndex, typename TValue = MaybeObject>
- TNode<TValue> LoadArrayElement(
- TNode<Array> array, int array_header_size, TNode<TIndex> index,
- int additional_offset = 0,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ TNode<TValue> LoadArrayElement(TNode<Array> array, int array_header_size,
+ TNode<TIndex> index,
+ int additional_offset = 0);
template <typename TIndex>
TNode<Object> LoadFixedArrayElement(
TNode<FixedArray> object, TNode<TIndex> index, int additional_offset = 0,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe,
CheckBounds check_bounds = CheckBounds::kAlways);
// This doesn't emit a bounds-check. As part of the security-performance
// tradeoff, only use it if it is performance critical.
- TNode<Object> UnsafeLoadFixedArrayElement(
- TNode<FixedArray> object, TNode<IntPtrT> index, int additional_offset = 0,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ TNode<Object> UnsafeLoadFixedArrayElement(TNode<FixedArray> object,
+ TNode<IntPtrT> index,
+ int additional_offset = 0) {
return LoadFixedArrayElement(object, index, additional_offset,
- needs_poisoning, CheckBounds::kDebugOnly);
+ CheckBounds::kDebugOnly);
}
- TNode<Object> LoadFixedArrayElement(
- TNode<FixedArray> object, int index, int additional_offset = 0,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ TNode<Object> LoadFixedArrayElement(TNode<FixedArray> object, int index,
+ int additional_offset = 0) {
return LoadFixedArrayElement(object, IntPtrConstant(index),
- additional_offset, needs_poisoning);
+ additional_offset);
}
// This doesn't emit a bounds-check. As part of the security-performance
// tradeoff, only use it if it is performance critical.
- TNode<Object> UnsafeLoadFixedArrayElement(
- TNode<FixedArray> object, int index, int additional_offset = 0,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ TNode<Object> UnsafeLoadFixedArrayElement(TNode<FixedArray> object, int index,
+ int additional_offset = 0) {
return LoadFixedArrayElement(object, IntPtrConstant(index),
- additional_offset, needs_poisoning,
- CheckBounds::kDebugOnly);
+ additional_offset, CheckBounds::kDebugOnly);
}
TNode<Object> LoadPropertyArrayElement(TNode<PropertyArray> object,
@@ -2138,7 +2133,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
kFixedArrays = 1,
kFixedDoubleArrays = 2,
kDontCopyCOW = 4,
- kNewSpaceAllocationOnly = 8,
kAllFixedArrays = kFixedArrays | kFixedDoubleArrays,
kAllFixedArraysDontCopyCOW = kAllFixedArrays | kDontCopyCOW
};
@@ -3647,8 +3641,28 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> GetArgumentValue(TorqueStructArguments args,
TNode<IntPtrT> index);
- TorqueStructArguments GetFrameArguments(TNode<RawPtrT> frame,
- TNode<IntPtrT> argc);
+ enum class FrameArgumentsArgcType {
+ kCountIncludesReceiver,
+ kCountExcludesReceiver
+ };
+
+ TorqueStructArguments GetFrameArguments(
+ TNode<RawPtrT> frame, TNode<IntPtrT> argc,
+ FrameArgumentsArgcType argc_type =
+ FrameArgumentsArgcType::kCountExcludesReceiver);
+
+ inline TNode<Int32T> JSParameterCount(TNode<Int32T> argc_without_receiver) {
+ return kJSArgcIncludesReceiver
+ ? Int32Add(argc_without_receiver,
+ Int32Constant(kJSArgcReceiverSlots))
+ : argc_without_receiver;
+ }
+ inline TNode<Word32T> JSParameterCount(TNode<Word32T> argc_without_receiver) {
+ return kJSArgcIncludesReceiver
+ ? Int32Add(argc_without_receiver,
+ Int32Constant(kJSArgcReceiverSlots))
+ : argc_without_receiver;
+ }
// Support for printf-style debugging
void Print(const char* s);
@@ -4086,7 +4100,7 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
CodeStubArguments(CodeStubAssembler* assembler,
TorqueStructArguments torque_arguments)
: assembler_(assembler),
- argc_(torque_arguments.length),
+ argc_(torque_arguments.actual_count),
base_(torque_arguments.base),
fp_(torque_arguments.frame) {}
@@ -4104,12 +4118,12 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
TNode<Object> AtIndex(int index) const;
// Return the number of arguments (excluding the receiver).
- TNode<IntPtrT> GetLength() const { return argc_; }
+ TNode<IntPtrT> GetLengthWithoutReceiver() const;
// Return the number of arguments (including the receiver).
TNode<IntPtrT> GetLengthWithReceiver() const;
TorqueStructArguments GetTorqueArguments() const {
- return TorqueStructArguments{fp_, base_, argc_};
+ return TorqueStructArguments{fp_, base_, GetLengthWithoutReceiver(), argc_};
}
TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index,
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index 4fd70a8d9e..9fab1cd40f 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -1064,8 +1064,8 @@ Handle<Code> ContinuationForConcurrentOptimization(
function->set_code(function->feedback_vector().optimized_code());
}
return handle(function->code(), isolate);
- } else if (function->shared().HasBaselineData()) {
- Code baseline_code = function->shared().baseline_data().baseline_code();
+ } else if (function->shared().HasBaselineCode()) {
+ Code baseline_code = function->shared().baseline_code(kAcquireLoad);
function->set_code(baseline_code);
return handle(baseline_code, isolate);
}
@@ -1179,9 +1179,13 @@ void SpawnDuplicateConcurrentJobForStressTesting(Isolate* isolate,
isolate->concurrent_recompilation_enabled() &&
mode == ConcurrencyMode::kNotConcurrent &&
isolate->node_observer() == nullptr);
+ GetOptimizedCodeResultHandling result_handling =
+ FLAG_stress_concurrent_inlining_attach_code
+ ? GetOptimizedCodeResultHandling::kDefault
+ : GetOptimizedCodeResultHandling::kDiscardForTesting;
USE(GetOptimizedCode(isolate, function, ConcurrencyMode::kConcurrent,
code_kind, BytecodeOffset::None(), nullptr,
- GetOptimizedCodeResultHandling::kDiscardForTesting));
+ result_handling));
}
bool FailAndClearPendingException(Isolate* isolate) {
@@ -1308,6 +1312,7 @@ void FinalizeUnoptimizedScriptCompilation(
void CompileAllWithBaseline(Isolate* isolate,
const FinalizeUnoptimizedCompilationDataList&
finalize_unoptimized_compilation_data_list) {
+ CodePageCollectionMemoryModificationScope code_allocation(isolate->heap());
for (const auto& finalize_data : finalize_unoptimized_compilation_data_list) {
Handle<SharedFunctionInfo> shared_info = finalize_data.function_handle();
IsCompiledScope is_compiled_scope(*shared_info, isolate);
@@ -1975,7 +1980,7 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
DCHECK(is_compiled_scope->is_compiled());
// Early return for already baseline-compiled functions.
- if (shared->HasBaselineData()) return true;
+ if (shared->HasBaselineCode()) return true;
// Check if we actually can compile with baseline.
if (!CanCompileWithBaseline(isolate, *shared)) return false;
@@ -1998,12 +2003,8 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
// report these somehow, or silently ignore them?
return false;
}
+ shared->set_baseline_code(*code, kReleaseStore);
- Handle<HeapObject> function_data =
- handle(HeapObject::cast(shared->function_data(kAcquireLoad)), isolate);
- Handle<BaselineData> baseline_data =
- isolate->factory()->NewBaselineData(code, function_data);
- shared->set_baseline_data(*baseline_data);
if (V8_LIKELY(FLAG_use_osr)) {
// Arm back edges for OSR
shared->GetBytecodeArray(isolate).set_osr_loop_nesting_level(
@@ -2035,7 +2036,7 @@ bool Compiler::CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
// Baseline code needs a feedback vector.
JSFunction::EnsureFeedbackVector(function, is_compiled_scope);
- Code baseline_code = shared->baseline_data().baseline_code(isolate);
+ Code baseline_code = shared->baseline_code(kAcquireLoad);
DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
function->set_code(baseline_code);
@@ -2210,7 +2211,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
// position, but store it as negative value for lazy translation.
StackTraceFrameIterator it(isolate);
if (!it.done() && it.is_javascript()) {
- FrameSummary summary = FrameSummary::GetTop(it.javascript_frame());
+ FrameSummary summary = it.GetTopValidFrame();
script->set_eval_from_shared(
summary.AsJavaScript().function()->shared());
script->set_origin_options(OriginOptionsForEval(*summary.script()));
@@ -2830,13 +2831,10 @@ MaybeHandle<SharedFunctionInfo> CompileScriptOnBothBackgroundAndMainThread(
return maybe_result;
}
-} // namespace
-
-// static
-MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
+MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScriptImpl(
Isolate* isolate, Handle<String> source,
const ScriptDetails& script_details, v8::Extension* extension,
- AlignedCachedData* cached_data,
+ AlignedCachedData* cached_data, BackgroundDeserializeTask* deserialize_task,
ScriptCompiler::CompileOptions compile_options,
ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
ScriptCompileTimerScope compile_timer(isolate, no_cache_reason);
@@ -2844,9 +2842,12 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
if (compile_options == ScriptCompiler::kNoCompileOptions ||
compile_options == ScriptCompiler::kEagerCompile) {
DCHECK_NULL(cached_data);
+ DCHECK_NULL(deserialize_task);
} else {
- DCHECK(compile_options == ScriptCompiler::kConsumeCodeCache);
- DCHECK(cached_data);
+ DCHECK_EQ(compile_options, ScriptCompiler::kConsumeCodeCache);
+ // Have to have exactly one of cached_data or deserialize_task.
+ DCHECK(cached_data || deserialize_task);
+ DCHECK(!(cached_data && deserialize_task));
DCHECK_NULL(extension);
}
int source_length = source->length();
@@ -2882,17 +2883,26 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileDeserialize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileDeserialize");
- Handle<SharedFunctionInfo> inner_result;
- if (CodeSerializer::Deserialize(isolate, cached_data, source,
- script_details.origin_options)
- .ToHandle(&inner_result) &&
- inner_result->is_compiled()) {
- // Promote to per-isolate compilation cache.
- is_compiled_scope = inner_result->is_compiled_scope(isolate);
- DCHECK(is_compiled_scope.is_compiled());
- compilation_cache->PutScript(source, language_mode, inner_result);
- maybe_result = inner_result;
+ if (deserialize_task) {
+ // If there's a cache consume task, finish it.
+ maybe_result = deserialize_task->Finish(isolate, source,
+ script_details.origin_options);
} else {
+ maybe_result = CodeSerializer::Deserialize(
+ isolate, cached_data, source, script_details.origin_options);
+ }
+
+ bool consuming_code_cache_succeeded = false;
+ Handle<SharedFunctionInfo> result;
+ if (maybe_result.ToHandle(&result)) {
+ is_compiled_scope = result->is_compiled_scope(isolate);
+ if (is_compiled_scope.is_compiled()) {
+ consuming_code_cache_succeeded = true;
+ // Promote to per-isolate compilation cache.
+ compilation_cache->PutScript(source, language_mode, result);
+ }
+ }
+ if (!consuming_code_cache_succeeded) {
// Deserializer failed. Fall through to compile.
compile_timer.set_consuming_code_cache_failed();
}
@@ -2937,6 +2947,51 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
return maybe_result;
}
+} // namespace
+
+MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details,
+ ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
+ return GetSharedFunctionInfoForScriptImpl(
+ isolate, source, script_details, nullptr, nullptr, nullptr,
+ compile_options, no_cache_reason, natives);
+}
+
+MaybeHandle<SharedFunctionInfo>
+Compiler::GetSharedFunctionInfoForScriptWithExtension(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details, v8::Extension* extension,
+ ScriptCompiler::CompileOptions compile_options, NativesFlag natives) {
+ return GetSharedFunctionInfoForScriptImpl(
+ isolate, source, script_details, extension, nullptr, nullptr,
+ compile_options, ScriptCompiler::kNoCacheBecauseV8Extension, natives);
+}
+
+MaybeHandle<SharedFunctionInfo>
+Compiler::GetSharedFunctionInfoForScriptWithCachedData(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details, AlignedCachedData* cached_data,
+ ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
+ return GetSharedFunctionInfoForScriptImpl(
+ isolate, source, script_details, nullptr, cached_data, nullptr,
+ compile_options, no_cache_reason, natives);
+}
+
+MaybeHandle<SharedFunctionInfo>
+Compiler::GetSharedFunctionInfoForScriptWithDeserializeTask(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details,
+ BackgroundDeserializeTask* deserialize_task,
+ ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
+ return GetSharedFunctionInfoForScriptImpl(
+ isolate, source, script_details, nullptr, nullptr, deserialize_task,
+ compile_options, no_cache_reason, natives);
+}
+
// static
MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
Handle<String> source, Handle<FixedArray> arguments,
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index 0d1582d872..97bd6bd027 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -161,8 +161,39 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// Create a shared function info object for a String source.
static MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScript(
Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details,
+ ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason,
+ NativesFlag is_natives_code);
+
+ // Create a shared function info object for a String source.
+ static MaybeHandle<SharedFunctionInfo>
+ GetSharedFunctionInfoForScriptWithExtension(
+ Isolate* isolate, Handle<String> source,
const ScriptDetails& script_details, v8::Extension* extension,
- AlignedCachedData* cached_data,
+ ScriptCompiler::CompileOptions compile_options,
+ NativesFlag is_natives_code);
+
+ // Create a shared function info object for a String source and serialized
+ // cached data. The cached data may be rejected, in which case this function
+ // will set cached_data->rejected() to true.
+ static MaybeHandle<SharedFunctionInfo>
+ GetSharedFunctionInfoForScriptWithCachedData(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details, AlignedCachedData* cached_data,
+ ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason,
+ NativesFlag is_natives_code);
+
+ // Create a shared function info object for a String source and a task that
+ // has deserialized cached data on a background thread. The cached data from
+ // the task may be rejected, in which case this function will set
+ // deserialize_task->rejected() to true.
+ static MaybeHandle<SharedFunctionInfo>
+ GetSharedFunctionInfoForScriptWithDeserializeTask(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details,
+ BackgroundDeserializeTask* deserialize_task,
ScriptCompiler::CompileOptions compile_options,
ScriptCompiler::NoCacheReason no_cache_reason,
NativesFlag is_natives_code);
@@ -571,6 +602,8 @@ class V8_EXPORT_PRIVATE BackgroundDeserializeTask {
Handle<String> source,
ScriptOriginOptions origin_options);
+ bool rejected() const { return cached_data_.rejected(); }
+
private:
Isolate* isolate_for_local_isolate_;
AlignedCachedData cached_data_;
diff --git a/deps/v8/src/codegen/constant-pool.cc b/deps/v8/src/codegen/constant-pool.cc
index 9af91d7a15..510f59185c 100644
--- a/deps/v8/src/codegen/constant-pool.cc
+++ b/deps/v8/src/codegen/constant-pool.cc
@@ -356,8 +356,7 @@ void ConstantPool::Emit(const ConstantPoolKey& key) {
if (assm_->IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(key.rmode())) {
int offset = assm_->pc_offset();
Assembler::EmbeddedObjectIndex index = key.value64();
- assm_->saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(offset, index));
+ assm_->saved_handles_for_raw_object_ptr_.emplace_back(offset, index);
Handle<Object> object = assm_->GetEmbeddedObject(index);
assm_->dq(object->ptr());
DCHECK(assm_->EmbeddedObjectMatches(offset, object, index));
diff --git a/deps/v8/src/codegen/constants-arch.h b/deps/v8/src/codegen/constants-arch.h
index 2417be5d4d..7eb32bafde 100644
--- a/deps/v8/src/codegen/constants-arch.h
+++ b/deps/v8/src/codegen/constants-arch.h
@@ -15,6 +15,8 @@
#include "src/codegen/mips/constants-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/constants-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/constants-loong64.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/constants-ppc.h"
#elif V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h
index ab6608679f..3cdae6d4c8 100644
--- a/deps/v8/src/codegen/cpu-features.h
+++ b/deps/v8/src/codegen/cpu-features.h
@@ -51,6 +51,9 @@ enum CpuFeature {
MIPSr6,
MIPS_SIMD, // MSA instructions
+#elif V8_TARGET_ARCH_LOONG64
+ FPU,
+
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
PPC_6_PLUS,
PPC_7_PLUS,
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index e1d8c5d96e..0c04e84a68 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -145,6 +145,19 @@ constexpr struct alignas(16) {
} wasm_uint32_max_as_double = {uint64_t{0x41efffffffe00000},
uint64_t{0x41efffffffe00000}};
+// This is 2147483648.0, which is 1 more than INT32_MAX.
+constexpr struct alignas(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+} wasm_int32_overflow_as_float = {
+ uint32_t{0x4f00'0000},
+ uint32_t{0x4f00'0000},
+ uint32_t{0x4f00'0000},
+ uint32_t{0x4f00'0000},
+};
+
// Implementation of ExternalReference
static ExternalReference::Type BuiltinCallTypeForResultSize(int result_size) {
@@ -400,6 +413,7 @@ IF_WASM(FUNCTION_REFERENCE, wasm_memory_fill, wasm::memory_fill_wrapper)
IF_WASM(FUNCTION_REFERENCE, wasm_float64_pow, wasm::float64_pow_wrapper)
IF_WASM(FUNCTION_REFERENCE, wasm_call_trap_callback_for_testing,
wasm::call_trap_callback_for_testing)
+IF_WASM(FUNCTION_REFERENCE, wasm_array_copy, wasm::array_copy_wrapper)
static void f64_acos_wrapper(Address data) {
double input = ReadUnalignedValue<double>(data);
@@ -618,6 +632,11 @@ ExternalReference ExternalReference::address_of_wasm_uint32_max_as_double() {
reinterpret_cast<Address>(&wasm_uint32_max_as_double));
}
+ExternalReference ExternalReference::address_of_wasm_int32_overflow_as_float() {
+ return ExternalReference(
+ reinterpret_cast<Address>(&wasm_int32_overflow_as_float));
+}
+
ExternalReference
ExternalReference::address_of_enable_experimental_regexp_engine() {
return ExternalReference(&FLAG_enable_experimental_regexp_engine);
@@ -688,6 +707,8 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() {
#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
#elif V8_TARGET_ARCH_MIPS64
#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
+#elif V8_TARGET_ARCH_LOONG64
+#define re_stack_check_func RegExpMacroAssemblerLOONG64::CheckStackGuardState
#elif V8_TARGET_ARCH_S390
#define re_stack_check_func RegExpMacroAssemblerS390::CheckStackGuardState
#elif V8_TARGET_ARCH_RISCV64
@@ -1180,7 +1201,7 @@ namespace {
// address, with the same value. This is done in order for TSAN to see these
// stores from generated code.
// Note that {value} is an int64_t irrespective of the store size. This is on
-// purpose to keep the function signatures the same accross stores. The
+// purpose to keep the function signatures the same across stores. The
// static_cast inside the method will ignore the bits which will not be stored.
void tsan_relaxed_store_8_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
@@ -1218,6 +1239,44 @@ void tsan_relaxed_store_64_bits(Address addr, int64_t value) {
#endif // V8_TARGET_ARCH_X64
}
+// Same as above, for sequentially consistent stores.
+void tsan_seq_cst_store_8_bits(Address addr, int64_t value) {
+#if V8_TARGET_ARCH_X64
+ base::SeqCst_Store(reinterpret_cast<base::Atomic8*>(addr),
+ static_cast<base::Atomic8>(value));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_ARCH_X64
+}
+
+void tsan_seq_cst_store_16_bits(Address addr, int64_t value) {
+#if V8_TARGET_ARCH_X64
+ base::SeqCst_Store(reinterpret_cast<base::Atomic16*>(addr),
+ static_cast<base::Atomic16>(value));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_ARCH_X64
+}
+
+void tsan_seq_cst_store_32_bits(Address addr, int64_t value) {
+#if V8_TARGET_ARCH_X64
+ base::SeqCst_Store(reinterpret_cast<base::Atomic32*>(addr),
+ static_cast<base::Atomic32>(value));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_ARCH_X64
+}
+
+void tsan_seq_cst_store_64_bits(Address addr, int64_t value) {
+#if V8_TARGET_ARCH_X64
+ base::SeqCst_Store(reinterpret_cast<base::Atomic64*>(addr),
+ static_cast<base::Atomic64>(value));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_ARCH_X64
+}
+
+// Same as above, for relaxed loads.
base::Atomic32 tsan_relaxed_load_32_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
return base::Relaxed_Load(reinterpret_cast<base::Atomic32*>(addr));
@@ -1245,6 +1304,14 @@ IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_store_function_32_bits,
tsan_relaxed_store_32_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_store_function_64_bits,
tsan_relaxed_store_64_bits)
+IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_8_bits,
+ tsan_seq_cst_store_8_bits)
+IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_16_bits,
+ tsan_seq_cst_store_16_bits)
+IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_32_bits,
+ tsan_seq_cst_store_32_bits)
+IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_64_bits,
+ tsan_seq_cst_store_64_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_load_function_32_bits,
tsan_relaxed_load_32_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_load_function_64_bits,
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index cbc3463841..ca62ff9d7a 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -111,13 +111,6 @@ class StatsCounter;
V(address_of_runtime_stats_flag, "TracingFlags::runtime_stats") \
V(address_of_the_hole_nan, "the_hole_nan") \
V(address_of_uint32_bias, "uint32_bias") \
- V(address_of_wasm_i8x16_swizzle_mask, "wasm_i8x16_swizzle_mask") \
- V(address_of_wasm_i8x16_popcnt_mask, "wasm_i8x16_popcnt_mask") \
- V(address_of_wasm_i8x16_splat_0x01, "wasm_i8x16_splat_0x01") \
- V(address_of_wasm_i8x16_splat_0x0f, "wasm_i8x16_splat_0x0f") \
- V(address_of_wasm_i8x16_splat_0x33, "wasm_i8x16_splat_0x33") \
- V(address_of_wasm_i8x16_splat_0x55, "wasm_i8x16_splat_0x55") \
- V(address_of_wasm_i16x8_splat_0x0001, "wasm_16x8_splat_0x0001") \
V(baseline_pc_for_bytecode_offset, "BaselinePCForBytecodeOffset") \
V(baseline_pc_for_next_executed_bytecode, \
"BaselinePCForNextExecutedBytecode") \
@@ -247,12 +240,21 @@ class StatsCounter;
IF_WASM(V, wasm_memory_init, "wasm::memory_init") \
IF_WASM(V, wasm_memory_copy, "wasm::memory_copy") \
IF_WASM(V, wasm_memory_fill, "wasm::memory_fill") \
+ IF_WASM(V, wasm_array_copy, "wasm::array_copy") \
+ V(address_of_wasm_i8x16_swizzle_mask, "wasm_i8x16_swizzle_mask") \
+ V(address_of_wasm_i8x16_popcnt_mask, "wasm_i8x16_popcnt_mask") \
+ V(address_of_wasm_i8x16_splat_0x01, "wasm_i8x16_splat_0x01") \
+ V(address_of_wasm_i8x16_splat_0x0f, "wasm_i8x16_splat_0x0f") \
+ V(address_of_wasm_i8x16_splat_0x33, "wasm_i8x16_splat_0x33") \
+ V(address_of_wasm_i8x16_splat_0x55, "wasm_i8x16_splat_0x55") \
+ V(address_of_wasm_i16x8_splat_0x0001, "wasm_16x8_splat_0x0001") \
V(address_of_wasm_f64x2_convert_low_i32x4_u_int_mask, \
"wasm_f64x2_convert_low_i32x4_u_int_mask") \
V(supports_wasm_simd_128_address, "wasm::supports_wasm_simd_128_address") \
V(address_of_wasm_double_2_power_52, "wasm_double_2_power_52") \
V(address_of_wasm_int32_max_as_double, "wasm_int32_max_as_double") \
V(address_of_wasm_uint32_max_as_double, "wasm_uint32_max_as_double") \
+ V(address_of_wasm_int32_overflow_as_float, "wasm_int32_overflow_as_float") \
V(write_barrier_marking_from_code_function, "WriteBarrier::MarkingFromCode") \
V(call_enqueue_microtask_function, "MicrotaskQueue::CallEnqueueMicrotask") \
V(call_enter_context_function, "call_enter_context_function") \
@@ -274,6 +276,14 @@ class StatsCounter;
"tsan_relaxed_store_function_32_bits") \
IF_TSAN(V, tsan_relaxed_store_function_64_bits, \
"tsan_relaxed_store_function_64_bits") \
+ IF_TSAN(V, tsan_seq_cst_store_function_8_bits, \
+ "tsan_seq_cst_store_function_8_bits") \
+ IF_TSAN(V, tsan_seq_cst_store_function_16_bits, \
+ "tsan_seq_cst_store_function_16_bits") \
+ IF_TSAN(V, tsan_seq_cst_store_function_32_bits, \
+ "tsan_seq_cst_store_function_32_bits") \
+ IF_TSAN(V, tsan_seq_cst_store_function_64_bits, \
+ "tsan_seq_cst_store_function_64_bits") \
IF_TSAN(V, tsan_relaxed_load_function_32_bits, \
"tsan_relaxed_load_function_32_bits") \
IF_TSAN(V, tsan_relaxed_load_function_64_bits, \
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index 90f8e8b70c..e921c11552 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -291,6 +291,8 @@ Register Operand::reg() const {
return Register::from_code(buf_[0] & 0x07);
}
+bool operator!=(Operand op, XMMRegister r) { return !op.is_reg(r); }
+
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
@@ -688,6 +690,14 @@ void Assembler::movq(XMMRegister dst, Operand src) {
emit_operand(dst, src);
}
+void Assembler::movq(Operand dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xD6);
+ emit_operand(src, dst);
+}
+
void Assembler::cmov(Condition cc, Register dst, Operand src) {
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index 89a65ee99b..31fc2c0221 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -306,6 +306,8 @@ ASSERT_TRIVIALLY_COPYABLE(Operand);
static_assert(sizeof(Operand) <= 2 * kSystemPointerSize,
"Operand must be small enough to pass it by value");
+bool operator!=(Operand op, XMMRegister r);
+
// -----------------------------------------------------------------------------
// A Displacement describes the 32bit immediate field of an instruction which
// may be used together with a Label in order to refer to a yet unknown code
@@ -535,6 +537,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movzx_w(Register dst, Operand src);
void movq(XMMRegister dst, Operand src);
+ void movq(Operand dst, XMMRegister src);
// Conditional moves
void cmov(Condition cc, Register dst, Register src) {
@@ -1544,6 +1547,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmovdqa(XMMRegister dst, Operand src) {
vinstr(0x6F, dst, xmm0, src, k66, k0F, kWIG);
}
+ void vmovdqa(XMMRegister dst, XMMRegister src) {
+ vinstr(0x6F, dst, xmm0, src, k66, k0F, kWIG);
+ }
void vmovdqu(XMMRegister dst, Operand src) {
vinstr(0x6F, dst, xmm0, src, kF3, k0F, kWIG);
}
@@ -1709,6 +1715,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
PACKED_CMP_LIST(AVX_CMP_P)
+ // vcmpgeps/vcmpgepd only in AVX.
+ AVX_CMP_P(cmpge, 0xd)
#undef AVX_CMP_P
#undef PACKED_CMP_LIST
@@ -1790,6 +1798,19 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SSE4_RM_INSTRUCTION_LIST(DECLARE_SSE4_AVX_RM_INSTRUCTION)
#undef DECLARE_SSE4_AVX_RM_INSTRUCTION
+ // AVX2 instructions
+#define AVX2_INSTRUCTION(instr, prefix, escape1, escape2, opcode) \
+ void instr(XMMRegister dst, XMMRegister src) { \
+ vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0, \
+ AVX2); \
+ } \
+ void instr(XMMRegister dst, Operand src) { \
+ vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0, \
+ AVX2); \
+ }
+ AVX2_BROADCAST_LIST(AVX2_INSTRUCTION)
+#undef AVX2_INSTRUCTION
+
// Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
// non-temporal
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index c95ea8ad2c..e11d6223ea 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -631,319 +631,6 @@ void TurboAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) {
add(dst, Immediate(0x80000000));
}
-void TurboAssembler::Pmulhrsw(XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmulhrsw(dst, src1, src2);
- } else {
- if (dst != src1) {
- movaps(dst, src1);
- }
- CpuFeatureScope sse_scope(this, SSSE3);
- pmulhrsw(dst, src2);
- }
-}
-
-void TurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister scratch) {
- ASM_CODE_COMMENT(this);
- // k = i16x8.splat(0x8000)
- Pcmpeqd(scratch, scratch);
- Psllw(scratch, scratch, byte{15});
-
- Pmulhrsw(dst, src1, src2);
- Pcmpeqw(scratch, dst);
- Pxor(dst, scratch);
-}
-
-void TurboAssembler::I8x16Popcnt(XMMRegister dst, XMMRegister src,
- XMMRegister tmp1, XMMRegister tmp2,
- Register scratch) {
- ASM_CODE_COMMENT(this);
- DCHECK_NE(dst, tmp1);
- DCHECK_NE(src, tmp1);
- DCHECK_NE(dst, tmp2);
- DCHECK_NE(src, tmp2);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovdqa(tmp1, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x0f(),
- scratch));
- vpandn(tmp2, tmp1, src);
- vpand(dst, tmp1, src);
- vmovdqa(tmp1, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_popcnt_mask(),
- scratch));
- vpsrlw(tmp2, tmp2, 4);
- vpshufb(dst, tmp1, dst);
- vpshufb(tmp2, tmp1, tmp2);
- vpaddb(dst, dst, tmp2);
- } else if (CpuFeatures::IsSupported(ATOM)) {
- // Pre-Goldmont low-power Intel microarchitectures have very slow
- // PSHUFB instruction, thus use PSHUFB-free divide-and-conquer
- // algorithm on these processors. ATOM CPU feature captures exactly
- // the right set of processors.
- movaps(tmp1, src);
- psrlw(tmp1, 1);
- if (dst != src) {
- movaps(dst, src);
- }
- andps(tmp1,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x55(), scratch));
- psubb(dst, tmp1);
- Operand splat_0x33 = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x33(), scratch);
- movaps(tmp1, dst);
- andps(dst, splat_0x33);
- psrlw(tmp1, 2);
- andps(tmp1, splat_0x33);
- paddb(dst, tmp1);
- movaps(tmp1, dst);
- psrlw(dst, 4);
- paddb(dst, tmp1);
- andps(dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x0f(), scratch));
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- movaps(tmp1,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x0f(), scratch));
- Operand mask = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_popcnt_mask(), scratch);
- if (tmp2 != tmp1) {
- movaps(tmp2, tmp1);
- }
- andps(tmp1, src);
- andnps(tmp2, src);
- psrlw(tmp2, 4);
- movaps(dst, mask);
- pshufb(dst, tmp1);
- movaps(tmp1, mask);
- pshufb(tmp1, tmp2);
- paddb(dst, tmp1);
- }
-}
-
-void TurboAssembler::F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src,
- Register tmp) {
- // dst = [ src_low, 0x43300000, src_high, 0x4330000 ];
- // 0x43300000'00000000 is a special double where the significand bits
- // precisely represents all uint32 numbers.
- if (!CpuFeatures::IsSupported(AVX) && dst != src) {
- movaps(dst, src);
- src = dst;
- }
- Unpcklps(dst, src,
- ExternalReferenceAsOperand(
- ExternalReference::
- address_of_wasm_f64x2_convert_low_i32x4_u_int_mask(),
- tmp));
- Subpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_double_2_power_52(), tmp));
-}
-
-void TurboAssembler::I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src,
- XMMRegister scratch,
- Register tmp) {
- ASM_CODE_COMMENT(this);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- XMMRegister original_dst = dst;
- // Make sure we don't overwrite src.
- if (dst == src) {
- DCHECK_NE(scratch, src);
- dst = scratch;
- }
- // dst = 0 if src == NaN, else all ones.
- vcmpeqpd(dst, src, src);
- // dst = 0 if src == NaN, else INT32_MAX as double.
- vandpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_int32_max_as_double(), tmp));
- // dst = 0 if src == NaN, src is saturated to INT32_MAX as double.
- vminpd(dst, src, dst);
- // Values > INT32_MAX already saturated, values < INT32_MIN raises an
- // exception, which is masked and returns 0x80000000.
- vcvttpd2dq(dst, dst);
-
- if (original_dst != dst) {
- vmovaps(original_dst, dst);
- }
- } else {
- if (dst != src) {
- movaps(dst, src);
- }
- movaps(scratch, dst);
- cmpeqpd(scratch, dst);
- andps(scratch,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_int32_max_as_double(), tmp));
- minpd(dst, scratch);
- cvttpd2dq(dst, dst);
- }
-}
-
-void TurboAssembler::I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src,
- XMMRegister scratch,
- Register tmp) {
- ASM_CODE_COMMENT(this);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vxorpd(scratch, scratch, scratch);
- // Saturate to 0.
- vmaxpd(dst, src, scratch);
- // Saturate to UINT32_MAX.
- vminpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_uint32_max_as_double(), tmp));
- // Truncate.
- vroundpd(dst, dst, kRoundToZero);
- // Add to special double where significant bits == uint32.
- vaddpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_double_2_power_52(), tmp));
- // Extract low 32 bits of each double's significand, zero top lanes.
- // dst = [dst[0], dst[2], 0, 0]
- vshufps(dst, dst, scratch, 0x88);
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- if (dst != src) {
- movaps(dst, src);
- }
-
- xorps(scratch, scratch);
- maxpd(dst, scratch);
- minpd(dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_uint32_max_as_double(), tmp));
- roundpd(dst, dst, kRoundToZero);
- addpd(dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_double_2_power_52(), tmp));
- shufps(dst, scratch, 0x88);
- }
-}
-
-void TurboAssembler::I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src,
- XMMRegister tmp,
- Register scratch) {
- // pmaddubsw treats the first operand as unsigned, so pass the external
- // reference to as the first operand.
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x01(), scratch);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovdqa(tmp, op);
- vpmaddubsw(dst, tmp, src);
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- if (dst == src) {
- movaps(tmp, op);
- pmaddubsw(tmp, src);
- movaps(dst, tmp);
- } else {
- movaps(dst, op);
- pmaddubsw(dst, src);
- }
- }
-}
-
-void TurboAssembler::I16x8ExtAddPairwiseI8x16U(XMMRegister dst, XMMRegister src,
- Register scratch) {
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x01(), scratch);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmaddubsw(dst, src, op);
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- movaps(dst, src);
- pmaddubsw(dst, op);
- }
-}
-
-void TurboAssembler::I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src,
- Register scratch) {
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i16x8_splat_0x0001(), scratch);
- // pmaddwd multiplies signed words in src and op, producing
- // signed doublewords, then adds pairwise.
- // src = |a|b|c|d|e|f|g|h|
- // dst = | a*1 + b*1 | c*1 + d*1 | e*1 + f*1 | g*1 + h*1 |
- Pmaddwd(dst, src, op);
-}
-
-void TurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src,
- XMMRegister tmp) {
- ASM_CODE_COMMENT(this);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- // src = |a|b|c|d|e|f|g|h| (low)
- // scratch = |0|a|0|c|0|e|0|g|
- vpsrld(tmp, src, 16);
- // dst = |0|b|0|d|0|f|0|h|
- vpblendw(dst, src, tmp, 0xAA);
- // dst = |a+b|c+d|e+f|g+h|
- vpaddd(dst, tmp, dst);
- } else if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- // There is a potentially better lowering if we get rip-relative constants,
- // see https://github.com/WebAssembly/simd/pull/380.
- movaps(tmp, src);
- psrld(tmp, 16);
- if (dst != src) {
- movaps(dst, src);
- }
- pblendw(dst, tmp, 0xAA);
- paddd(dst, tmp);
- } else {
- // src = |a|b|c|d|e|f|g|h|
- // tmp = i32x4.splat(0x0000FFFF)
- pcmpeqd(tmp, tmp);
- psrld(tmp, byte{16});
- // tmp =|0|b|0|d|0|f|0|h|
- andps(tmp, src);
- // dst = |0|a|0|c|0|e|0|g|
- if (dst != src) {
- movaps(dst, src);
- }
- psrld(dst, byte{16});
- // dst = |a+b|c+d|e+f|g+h|
- paddd(dst, tmp);
- }
-}
-
-void TurboAssembler::I8x16Swizzle(XMMRegister dst, XMMRegister src,
- XMMRegister mask, XMMRegister scratch,
- Register tmp, bool omit_add) {
- if (omit_add) {
- Pshufb(dst, src, mask);
- return;
- }
-
- // Out-of-range indices should return 0, add 112 so that any value > 15
- // saturates to 128 (top bit set), so pshufb will zero that lane.
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_swizzle_mask(), tmp);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpaddusb(scratch, mask, op);
- vpshufb(dst, src, scratch);
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- movaps(scratch, op);
- if (dst != src) {
- movaps(dst, src);
- }
- paddusb(scratch, mask);
- pshufb(dst, scratch);
- }
-}
-
void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
DCHECK_GE(63, shift);
if (shift >= 32) {
@@ -1584,8 +1271,10 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
lea(scratch,
Operand(expected_parameter_count, times_system_pointer_size, 0));
AllocateStackSpace(scratch);
- // Extra words are the receiver and the return address (if a jump).
- int extra_words = type == InvokeType::kCall ? 1 : 2;
+ // Extra words are the receiver (if not already included in argc) and the
+ // return address (if a jump).
+ int extra_words = type == InvokeType::kCall ? 0 : 1;
+ if (!kJSArgcIncludesReceiver) extra_words++;
lea(num, Operand(eax, extra_words)); // Number of words to copy.
Move(current, 0);
// Fall-through to the loop body because there are non-zero words to copy.
@@ -1895,22 +1584,6 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
}
}
-void TurboAssembler::Pshufb(XMMRegister dst, XMMRegister src, Operand mask) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpshufb(dst, src, mask);
- return;
- }
-
- // Make sure these are different so that we won't overwrite mask.
- DCHECK(!mask.is_reg(dst));
- CpuFeatureScope sse_scope(this, SSSE3);
- if (dst != src) {
- movaps(dst, src);
- }
- pshufb(dst, mask);
-}
-
void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
@@ -2015,16 +1688,6 @@ void TurboAssembler::Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2,
}
}
-void TurboAssembler::Vbroadcastss(XMMRegister dst, Operand src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vbroadcastss(dst, src);
- return;
- }
- movss(dst, src);
- shufps(dst, dst, static_cast<byte>(0));
-}
-
void TurboAssembler::Lzcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
@@ -2385,63 +2048,6 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
jmp(code_object, rmode);
}
-void TurboAssembler::RetpolineCall(Register reg) {
- ASM_CODE_COMMENT(this);
- Label setup_return, setup_target, inner_indirect_branch, capture_spec;
-
- jmp(&setup_return); // Jump past the entire retpoline below.
-
- bind(&inner_indirect_branch);
- call(&setup_target);
-
- bind(&capture_spec);
- pause();
- jmp(&capture_spec);
-
- bind(&setup_target);
- mov(Operand(esp, 0), reg);
- ret(0);
-
- bind(&setup_return);
- call(&inner_indirect_branch); // Callee will return after this instruction.
-}
-
-void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
- ASM_CODE_COMMENT(this);
- Label setup_return, setup_target, inner_indirect_branch, capture_spec;
-
- jmp(&setup_return); // Jump past the entire retpoline below.
-
- bind(&inner_indirect_branch);
- call(&setup_target);
-
- bind(&capture_spec);
- pause();
- jmp(&capture_spec);
-
- bind(&setup_target);
- mov(Operand(esp, 0), destination, rmode);
- ret(0);
-
- bind(&setup_return);
- call(&inner_indirect_branch); // Callee will return after this instruction.
-}
-
-void TurboAssembler::RetpolineJump(Register reg) {
- ASM_CODE_COMMENT(this);
- Label setup_target, capture_spec;
-
- call(&setup_target);
-
- bind(&capture_spec);
- pause();
- jmp(&capture_spec);
-
- bind(&setup_target);
- mov(Operand(esp, 0), reg);
- ret(0);
-}
-
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met,
Label::Distance condition_met_distance) {
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index 527c357047..bf8f356e8c 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -68,9 +68,10 @@ class StackArgumentsAccessor {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
};
-class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
+class V8_EXPORT_PRIVATE TurboAssembler
+ : public SharedTurboAssemblerBase<TurboAssembler> {
public:
- using SharedTurboAssembler::SharedTurboAssembler;
+ using SharedTurboAssemblerBase<TurboAssembler>::SharedTurboAssemblerBase;
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met,
@@ -158,15 +159,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
JumpMode jump_mode = JumpMode::kJump);
void Jump(const ExternalReference& reference);
- void RetpolineCall(Register reg);
- void RetpolineCall(Address destination, RelocInfo::Mode rmode);
-
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
void LoadMap(Register destination, Register object);
- void RetpolineJump(Register reg);
-
void Trap();
void DebugBreak();
@@ -326,10 +322,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
name(dst, src2); \
} \
}
- AVX_OP3_WITH_MOVE(Cmpeqps, cmpeqps, XMMRegister, XMMRegister)
AVX_OP3_WITH_MOVE(Movlps, movlps, XMMRegister, Operand)
AVX_OP3_WITH_MOVE(Movhps, movhps, XMMRegister, Operand)
- AVX_OP3_WITH_MOVE(Pmaddwd, pmaddwd, XMMRegister, Operand)
#undef AVX_OP3_WITH_MOVE
// TODO(zhin): Remove after moving more definitions into SharedTurboAssembler.
@@ -340,14 +334,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
SharedTurboAssembler::Movhps(dst, src);
}
- void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, dst, src); }
- void Pshufb(XMMRegister dst, Operand src) { Pshufb(dst, dst, src); }
- // Handles SSE and AVX. On SSE, moves src to dst if they are not equal.
- void Pshufb(XMMRegister dst, XMMRegister src, XMMRegister mask) {
- Pshufb(dst, src, Operand(mask));
- }
- void Pshufb(XMMRegister dst, XMMRegister src, Operand mask);
-
void Pextrd(Register dst, XMMRegister src, uint8_t imm8);
void Pinsrb(XMMRegister dst, Register src, int8_t imm8) {
Pinsrb(dst, Operand(src), imm8);
@@ -367,7 +353,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Pinsrw(XMMRegister dst, Operand src, int8_t imm8);
// Moves src1 to dst if AVX is not supported.
void Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8);
- void Vbroadcastss(XMMRegister dst, Operand src);
// Expression support
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
@@ -395,32 +380,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
}
void Cvttsd2ui(Register dst, Operand src, XMMRegister tmp);
- // Handles SSE and AVX. On SSE, moves src to dst if they are not equal.
- void Pmulhrsw(XMMRegister dst, XMMRegister src1, XMMRegister src2);
-
- // These Wasm SIMD ops do not have direct lowerings on IA32. These
- // helpers are optimized to produce the fastest and smallest codegen.
- // Defined here to allow usage on both TurboFan and Liftoff.
- void I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister scratch);
- void I8x16Popcnt(XMMRegister dst, XMMRegister src, XMMRegister tmp1,
- XMMRegister tmp2, Register scratch);
- void F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src, Register tmp);
- void I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src,
- XMMRegister scratch, Register tmp);
- void I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src,
- XMMRegister scratch, Register tmp);
- void I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src,
- XMMRegister tmp, Register scratch);
- void I16x8ExtAddPairwiseI8x16U(XMMRegister dst, XMMRegister src,
- Register scratch);
- void I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src,
- Register scratch);
- void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src,
- XMMRegister tmp);
- void I8x16Swizzle(XMMRegister dst, XMMRegister src, XMMRegister mask,
- XMMRegister scratch, Register tmp, bool omit_add = false);
-
void Push(Register src) { push(src); }
void Push(Operand src) { push(src); }
void Push(Immediate value);
@@ -480,9 +439,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- void ResetSpeculationPoisonRegister() { UNREACHABLE(); }
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
diff --git a/deps/v8/src/codegen/ia32/register-ia32.h b/deps/v8/src/codegen/ia32/register-ia32.h
index 5dc035d966..37a5783ded 100644
--- a/deps/v8/src/codegen/ia32/register-ia32.h
+++ b/deps/v8/src/codegen/ia32/register-ia32.h
@@ -161,9 +161,6 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = edi;
constexpr Register kRootRegister = ebx;
-// TODO(860429): Remove remaining poisoning infrastructure on ia32.
-constexpr Register kSpeculationPoisonRegister = no_reg;
-
constexpr DoubleRegister kFPReturnRegister0 = xmm1; // xmm0 isn't allocatable.
} // namespace internal
diff --git a/deps/v8/src/codegen/ia32/sse-instr.h b/deps/v8/src/codegen/ia32/sse-instr.h
index d775dfdd77..ef81e1014f 100644
--- a/deps/v8/src/codegen/ia32/sse-instr.h
+++ b/deps/v8/src/codegen/ia32/sse-instr.h
@@ -102,4 +102,10 @@
V(pmovzxdq, 66, 0F, 38, 35) \
V(ptest, 66, 0F, 38, 17)
+// These require AVX2, and we only define the VEX-128 versions.
+#define AVX2_BROADCAST_LIST(V) \
+ V(vpbroadcastd, 66, 0F, 38, 58) \
+ V(vpbroadcastb, 66, 0F, 38, 78) \
+ V(vpbroadcastw, 66, 0F, 38, 79)
+
#endif // V8_CODEGEN_IA32_SSE_INSTR_H_
diff --git a/deps/v8/src/codegen/interface-descriptors-inl.h b/deps/v8/src/codegen/interface-descriptors-inl.h
index cf4ff5b0e6..d5a8ccf6e4 100644
--- a/deps/v8/src/codegen/interface-descriptors-inl.h
+++ b/deps/v8/src/codegen/interface-descriptors-inl.h
@@ -27,6 +27,8 @@
#include "src/codegen/mips64/interface-descriptors-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/interface-descriptors-mips-inl.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/interface-descriptors-loong64-inl.h"
#elif V8_TARGET_ARCH_RISCV64
#include "src/codegen/riscv64/interface-descriptors-riscv64-inl.h"
#else
@@ -318,9 +320,10 @@ constexpr auto LoadWithReceiverBaselineDescriptor::registers() {
// static
constexpr auto BaselineOutOfLinePrologueDescriptor::registers() {
// TODO(v8:11421): Implement on other platforms.
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \
- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
- V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
+ V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_LOONG64
return RegisterArray(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
kJavaScriptCallExtraArg1Register, kJavaScriptCallNewTargetRegister,
@@ -341,7 +344,7 @@ constexpr auto BaselineLeaveFrameDescriptor::registers() {
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_LOONG64
return RegisterArray(ParamsSizeRegister(), WeightRegister());
#else
return DefaultRegisterArray();
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index cf4840bfd7..87bef49f37 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -111,8 +111,8 @@ namespace internal {
V(StringAt) \
V(StringAtAsString) \
V(StringSubstring) \
- IF_TSAN(V, TSANRelaxedStore) \
- IF_TSAN(V, TSANRelaxedLoad) \
+ IF_TSAN(V, TSANStore) \
+ IF_TSAN(V, TSANLoad) \
V(TypeConversion) \
V(TypeConversionNoContext) \
V(TypeConversion_Baseline) \
@@ -1053,26 +1053,26 @@ class WriteBarrierDescriptor final
};
#ifdef V8_IS_TSAN
-class TSANRelaxedStoreDescriptor final
- : public StaticCallInterfaceDescriptor<TSANRelaxedStoreDescriptor> {
+class TSANStoreDescriptor final
+ : public StaticCallInterfaceDescriptor<TSANStoreDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kValue)
DEFINE_PARAMETER_TYPES(MachineType::Pointer(), // kAddress
MachineType::AnyTagged()) // kValue
- DECLARE_DESCRIPTOR(TSANRelaxedStoreDescriptor)
+ DECLARE_DESCRIPTOR(TSANStoreDescriptor)
static constexpr auto registers();
static constexpr bool kRestrictAllocatableRegisters = true;
};
-class TSANRelaxedLoadDescriptor final
- : public StaticCallInterfaceDescriptor<TSANRelaxedLoadDescriptor> {
+class TSANLoadDescriptor final
+ : public StaticCallInterfaceDescriptor<TSANLoadDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress)
DEFINE_PARAMETER_TYPES(MachineType::Pointer()) // kAddress
- DECLARE_DESCRIPTOR(TSANRelaxedLoadDescriptor)
+ DECLARE_DESCRIPTOR(TSANLoadDescriptor)
static constexpr auto registers();
static constexpr bool kRestrictAllocatableRegisters = true;
diff --git a/deps/v8/src/codegen/loong64/assembler-loong64-inl.h b/deps/v8/src/codegen/loong64/assembler-loong64-inl.h
new file mode 100644
index 0000000000..597d5e048e
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/assembler-loong64-inl.h
@@ -0,0 +1,249 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_
+#define V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_
+
+#include "src/codegen/assembler.h"
+#include "src/codegen/loong64/assembler-loong64.h"
+#include "src/debug/debug.h"
+#include "src/objects/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); }
+
+// -----------------------------------------------------------------------------
+// Operand and MemOperand.
+
+bool Operand::is_reg() const { return rm_.is_valid(); }
+
+int64_t Operand::immediate() const {
+ DCHECK(!is_reg());
+ DCHECK(!IsHeapObjectRequest());
+ return value_.immediate;
+}
+
+// -----------------------------------------------------------------------------
+// RelocInfo.
+
+void RelocInfo::apply(intptr_t delta) {
+ if (IsInternalReference(rmode_)) {
+ // Absolute code pointer inside code object moves with the code object.
+ Assembler::RelocateInternalReference(rmode_, pc_, delta);
+ } else {
+ DCHECK(IsRelativeCodeTarget(rmode_));
+ Assembler::RelocateRelativeReference(rmode_, pc_, delta);
+ }
+}
+
+Address RelocInfo::target_address() {
+ DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
+ IsWasmCall(rmode_));
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+Address RelocInfo::target_address_address() {
+ DCHECK(HasTargetAddressAddress());
+ // Read the address of the word containing the target_address in an
+ // instruction stream.
+ // The only architecture-independent user of this function is the serializer.
+ // The serializer uses it to find out how many raw bytes of instruction to
+ // output before the next target.
+ // For an instruction like LUI/ORI where the target bits are mixed into the
+ // instruction bits, the size of the target will be zero, indicating that the
+ // serializer should not step forward in memory after a target is resolved
+ // and written. In this case the target_address_address function should
+ // return the end of the instructions to be patched, allowing the
+ // deserializer to deserialize the instructions as raw bytes and put them in
+ // place, ready to be patched with the target. After jump optimization,
+ // that is the address of the instruction that follows J/JAL/JR/JALR
+ // instruction.
+ return pc_ + Assembler::kInstructionsFor64BitConstant * kInstrSize;
+}
+
+Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
+
+int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
+
+void Assembler::deserialization_set_special_target_at(
+ Address instruction_payload, Code code, Address target) {
+ set_target_address_at(instruction_payload,
+ !code.is_null() ? code.constant_pool() : kNullAddress,
+ target);
+}
+
+int Assembler::deserialization_special_target_size(
+ Address instruction_payload) {
+ return kSpecialTargetSize;
+}
+
+void Assembler::deserialization_set_target_internal_reference_at(
+ Address pc, Address target, RelocInfo::Mode mode) {
+ WriteUnalignedValue<Address>(pc, target);
+}
+
+HeapObject RelocInfo::target_object() {
+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) ||
+ IsDataEmbeddedObject(rmode_));
+ if (IsDataEmbeddedObject(rmode_)) {
+ return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
+ }
+ return HeapObject::cast(
+ Object(Assembler::target_address_at(pc_, constant_pool_)));
+}
+
+HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+ return target_object();
+}
+
+Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
+ if (IsDataEmbeddedObject(rmode_)) {
+ return Handle<HeapObject>::cast(ReadUnalignedValue<Handle<Object>>(pc_));
+ } else if (IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)) {
+ return Handle<HeapObject>(reinterpret_cast<Address*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
+ } else {
+ DCHECK(IsRelativeCodeTarget(rmode_));
+ return origin->relative_code_target_object_handle_at(pc_);
+ }
+}
+
+void RelocInfo::set_target_object(Heap* heap, HeapObject target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) ||
+ IsDataEmbeddedObject(rmode_));
+ if (IsDataEmbeddedObject(rmode_)) {
+ WriteUnalignedValue(pc_, target.ptr());
+ // No need to flush icache since no instructions were changed.
+ } else {
+ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
+ icache_flush_mode);
+ }
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
+ WriteBarrierForCode(host(), this, target);
+ }
+}
+
+Address RelocInfo::target_external_reference() {
+ DCHECK(rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+void RelocInfo::set_target_external_reference(
+ Address target, ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
+ icache_flush_mode);
+}
+
+Address RelocInfo::target_internal_reference() {
+ if (rmode_ == INTERNAL_REFERENCE) {
+ return Memory<Address>(pc_);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+Address RelocInfo::target_internal_reference_address() {
+ DCHECK(rmode_ == INTERNAL_REFERENCE);
+ return pc_;
+}
+
+Handle<Code> Assembler::relative_code_target_object_handle_at(
+ Address pc) const {
+ Instr instr = Assembler::instr_at(pc);
+ int32_t code_target_index = instr & kImm26Mask;
+ code_target_index = ((code_target_index & 0x3ff) << 22 >> 6) |
+ ((code_target_index >> 10) & kImm16Mask);
+ return GetCodeTarget(code_target_index);
+}
+
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ return target_address();
+}
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ if (target_address() != target)
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
+}
+
+Address RelocInfo::target_off_heap_target() {
+ DCHECK(IsOffHeapTarget(rmode_));
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+void RelocInfo::WipeOut() {
+ DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
+ IsInternalReference(rmode_) || IsOffHeapTarget(rmode_));
+ if (IsInternalReference(rmode_)) {
+ Memory<Address>(pc_) = kNullAddress;
+ } else {
+ Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress);
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Assembler.
+
+void Assembler::CheckBuffer() {
+ if (buffer_space() <= kGap) {
+ GrowBuffer();
+ }
+}
+
+void Assembler::EmitHelper(Instr x) {
+ *reinterpret_cast<Instr*>(pc_) = x;
+ pc_ += kInstrSize;
+ CheckTrampolinePoolQuick();
+}
+
+template <>
+inline void Assembler::EmitHelper(uint8_t x);
+
+template <typename T>
+void Assembler::EmitHelper(T x) {
+ *reinterpret_cast<T*>(pc_) = x;
+ pc_ += sizeof(x);
+ CheckTrampolinePoolQuick();
+}
+
+template <>
+void Assembler::EmitHelper(uint8_t x) {
+ *reinterpret_cast<uint8_t*>(pc_) = x;
+ pc_ += sizeof(x);
+ if (reinterpret_cast<intptr_t>(pc_) % kInstrSize == 0) {
+ CheckTrampolinePoolQuick();
+ }
+}
+
+void Assembler::emit(Instr x) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ EmitHelper(x);
+}
+
+void Assembler::emit(uint64_t data) {
+ // CheckForEmitInForbiddenSlot();
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ EmitHelper(data);
+}
+
+EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_
diff --git a/deps/v8/src/codegen/loong64/assembler-loong64.cc b/deps/v8/src/codegen/loong64/assembler-loong64.cc
new file mode 100644
index 0000000000..cc1eaa7d12
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/assembler-loong64.cc
@@ -0,0 +1,2405 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/loong64/assembler-loong64.h"
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/base/cpu.h"
+#include "src/codegen/loong64/assembler-loong64-inl.h"
+#include "src/codegen/machine-type.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/codegen/string-constants.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/objects/heap-number-inl.h"
+
+namespace v8 {
+namespace internal {
+
+bool CpuFeatures::SupportsWasmSimd128() { return false; }
+
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+ supported_ |= 1u << FPU;
+
+ // Only use statically determined features for cross compile (snapshot).
+ if (cross_compile) return;
+
+#ifdef __loongarch__
+ // Probe for additional features at runtime.
+ base::CPU cpu;
+ supported_ |= 1u << FPU;
+#endif
+
+ // Set a static value on whether Simd is supported.
+ // This variable is only used for certain archs to query SupportWasmSimd128()
+ // at runtime in builtins using an extern ref. Other callers should use
+ // CpuFeatures::SupportWasmSimd128().
+ CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
+}
+
+void CpuFeatures::PrintTarget() {}
+void CpuFeatures::PrintFeatures() {}
+
+int ToNumber(Register reg) {
+ DCHECK(reg.is_valid());
+ const int kNumbers[] = {
+ 0, // zero_reg
+ 1, // ra
+ 2, // tp
+ 3, // sp
+ 4, // a0 v0
+ 5, // a1 v1
+ 6, // a2
+ 7, // a3
+ 8, // a4
+ 9, // a5
+ 10, // a6
+ 11, // a7
+ 12, // t0
+ 13, // t1
+ 14, // t2
+ 15, // t3
+ 16, // t4
+ 17, // t5
+ 18, // t6
+ 19, // t7
+ 20, // t8
+ 21, // x_reg
+ 22, // fp
+ 23, // s0
+ 24, // s1
+ 25, // s2
+ 26, // s3
+ 27, // s4
+ 28, // s5
+ 29, // s6
+ 30, // s7
+ 31, // s8
+ };
+ return kNumbers[reg.code()];
+}
+
+Register ToRegister(int num) {
+ DCHECK(num >= 0 && num < kNumRegisters);
+ const Register kRegisters[] = {
+ zero_reg, ra, tp, sp, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3,
+ t4, t5, t6, t7, t8, x_reg, fp, s0, s1, s2, s3, s4, s5, s6, s7, s8};
+ return kRegisters[num];
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo.
+
+const int RelocInfo::kApplyMask =
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
+
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on LoongArch64 means that it is a lu12i_w/ori instruction,
+ // and that is always the case inside code objects.
+ return true;
+}
+
+bool RelocInfo::IsInConstantPool() { return false; }
+
+uint32_t RelocInfo::wasm_call_tag() const {
+ DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
+ return static_cast<uint32_t>(
+ Assembler::target_address_at(pc_, constant_pool_));
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand.
+// See assembler-loong64-inl.h for inlined constructors.
+
+Operand::Operand(Handle<HeapObject> handle)
+ : rm_(no_reg), rmode_(RelocInfo::FULL_EMBEDDED_OBJECT) {
+ value_.immediate = static_cast<intptr_t>(handle.address());
+}
+
+Operand Operand::EmbeddedNumber(double value) {
+ int32_t smi;
+ if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(value);
+ return result;
+}
+
+Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(str);
+ return result;
+}
+
+MemOperand::MemOperand(Register base, int32_t offset)
+ : base_(base), index_(no_reg), offset_(offset) {}
+
+MemOperand::MemOperand(Register base, Register index)
+ : base_(base), index_(index), offset_(0) {}
+
+void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
+ for (auto& request : heap_object_requests_) {
+ Handle<HeapObject> object;
+ switch (request.kind()) {
+ case HeapObjectRequest::kHeapNumber:
+ object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
+ break;
+ case HeapObjectRequest::kStringConstant:
+ const StringConstantBase* str = request.string();
+ CHECK_NOT_NULL(str);
+ object = str->AllocateStringConstant(isolate);
+ break;
+ }
+ Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
+ set_target_value_at(pc, reinterpret_cast<uint64_t>(object.location()));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+
+Assembler::Assembler(const AssemblerOptions& options,
+ std::unique_ptr<AssemblerBuffer> buffer)
+ : AssemblerBase(options, std::move(buffer)),
+ scratch_register_list_(t7.bit() | t6.bit()) {
+ reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
+
+ last_trampoline_pool_end_ = 0;
+ no_trampoline_pool_before_ = 0;
+ trampoline_pool_blocked_nesting_ = 0;
+ // We leave space (16 * kTrampolineSlotsSize)
+ // for BlockTrampolinePoolScope buffer.
+ next_buffer_check_ = FLAG_force_long_branches
+ ? kMaxInt
+ : kMax16BranchOffset - kTrampolineSlotsSize * 16;
+ internal_trampoline_exception_ = false;
+ last_bound_pos_ = 0;
+
+ trampoline_emitted_ = FLAG_force_long_branches;
+ unbound_labels_count_ = 0;
+ block_buffer_growth_ = false;
+}
+
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
+ // EmitForbiddenSlotInstruction(); TODO:LOONG64 why?
+
+ int code_comments_size = WriteCodeComments();
+
+ DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
+
+ AllocateAndInstallRequestedHeapObjects(isolate);
+
+ // Set up code descriptor.
+ // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
+ // this point to make CodeDesc initialization less fiddly.
+
+ static constexpr int kConstantPoolSize = 0;
+ const int instruction_size = pc_offset();
+ const int code_comments_offset = instruction_size - code_comments_size;
+ const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
+ const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
+ ? constant_pool_offset
+ : handler_table_offset;
+ const int safepoint_table_offset =
+ (safepoint_table_builder == kNoSafepointTable)
+ ? handler_table_offset2
+ : safepoint_table_builder->GetCodeOffset();
+ const int reloc_info_offset =
+ static_cast<int>(reloc_info_writer.pos() - buffer_->start());
+ CodeDesc::Initialize(desc, this, safepoint_table_offset,
+ handler_table_offset2, constant_pool_offset,
+ code_comments_offset, reloc_info_offset);
+}
+
+void Assembler::Align(int m) {
+ // If not, the loop below won't terminate.
+ DCHECK(IsAligned(pc_offset(), kInstrSize));
+ DCHECK(m >= kInstrSize && base::bits::IsPowerOfTwo(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+void Assembler::CodeTargetAlign() {
+ // No advantage to aligning branch/call targets to more than
+ // single instruction, that I am aware of.
+ Align(4);
+}
+
+Register Assembler::GetRkReg(Instr instr) {
+ return Register::from_code((instr & kRkFieldMask) >> kRkShift);
+}
+
+Register Assembler::GetRjReg(Instr instr) {
+ return Register::from_code((instr & kRjFieldMask) >> kRjShift);
+}
+
+Register Assembler::GetRdReg(Instr instr) {
+ return Register::from_code((instr & kRdFieldMask) >> kRdShift);
+}
+
+uint32_t Assembler::GetRk(Instr instr) {
+ return (instr & kRkFieldMask) >> kRkShift;
+}
+
+uint32_t Assembler::GetRkField(Instr instr) { return instr & kRkFieldMask; }
+
+uint32_t Assembler::GetRj(Instr instr) {
+ return (instr & kRjFieldMask) >> kRjShift;
+}
+
+uint32_t Assembler::GetRjField(Instr instr) { return instr & kRjFieldMask; }
+
+uint32_t Assembler::GetRd(Instr instr) {
+ return (instr & kRdFieldMask) >> kRdShift;
+}
+
+uint32_t Assembler::GetRdField(Instr instr) { return instr & kRdFieldMask; }
+
+uint32_t Assembler::GetSa2(Instr instr) {
+ return (instr & kSa2FieldMask) >> kSaShift;
+}
+
+uint32_t Assembler::GetSa2Field(Instr instr) { return instr & kSa2FieldMask; }
+
+uint32_t Assembler::GetSa3(Instr instr) {
+ return (instr & kSa3FieldMask) >> kSaShift;
+}
+
+uint32_t Assembler::GetSa3Field(Instr instr) { return instr & kSa3FieldMask; }
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+// The link chain is terminated by a value in the instruction of 0,
+// which is an otherwise illegal value (branch 0 is inf loop).
+// The instruction 16-bit offset field addresses 32-bit words, but in
+// code is conv to an 18-bit value addressing bytes, hence the -4 value.
+
+const int kEndOfChain = 0;
+// Determines the end of the Jump chain (a subset of the label link chain).
+const int kEndOfJumpChain = 0;
+
+bool Assembler::IsBranch(Instr instr) {
+ uint32_t opcode = (instr >> 26) << 26;
+ // Checks if the instruction is a branch.
+ bool isBranch = opcode == BEQZ || opcode == BNEZ || opcode == BCZ ||
+ opcode == B || opcode == BL || opcode == BEQ ||
+ opcode == BNE || opcode == BLT || opcode == BGE ||
+ opcode == BLTU || opcode == BGEU;
+ return isBranch;
+}
+
+bool Assembler::IsB(Instr instr) {
+ uint32_t opcode = (instr >> 26) << 26;
+ // Checks if the instruction is a b.
+ bool isBranch = opcode == B || opcode == BL;
+ return isBranch;
+}
+
+bool Assembler::IsBz(Instr instr) {
+ uint32_t opcode = (instr >> 26) << 26;
+ // Checks if the instruction is a branch.
+ bool isBranch = opcode == BEQZ || opcode == BNEZ || opcode == BCZ;
+ return isBranch;
+}
+
+bool Assembler::IsEmittedConstant(Instr instr) {
+ // Add GetLabelConst function?
+ uint32_t label_constant = instr & ~kImm16Mask;
+ return label_constant == 0; // Emitted label const in reg-exp engine.
+}
+
+bool Assembler::IsJ(Instr instr) {
+ uint32_t opcode = (instr >> 26) << 26;
+ // Checks if the instruction is a jump.
+ return opcode == JIRL;
+}
+
+bool Assembler::IsLu12i_w(Instr instr) {
+ uint32_t opcode = (instr >> 25) << 25;
+ return opcode == LU12I_W;
+}
+
+bool Assembler::IsOri(Instr instr) {
+ uint32_t opcode = (instr >> 22) << 22;
+ return opcode == ORI;
+}
+
+bool Assembler::IsLu32i_d(Instr instr) {
+ uint32_t opcode = (instr >> 25) << 25;
+ return opcode == LU32I_D;
+}
+
+bool Assembler::IsLu52i_d(Instr instr) {
+ uint32_t opcode = (instr >> 22) << 22;
+ return opcode == LU52I_D;
+}
+
+bool Assembler::IsMov(Instr instr, Register rd, Register rj) {
+ // Checks if the instruction is a OR with zero_reg argument (aka MOV).
+ Instr instr1 =
+ OR | zero_reg.code() << kRkShift | rj.code() << kRjShift | rd.code();
+ return instr == instr1;
+}
+
+bool Assembler::IsPcAddi(Instr instr, Register rd, int32_t si20) {
+ DCHECK(is_int20(si20));
+ Instr instr1 = PCADDI | (si20 & 0xfffff) << kRjShift | rd.code();
+ return instr == instr1;
+}
+
+bool Assembler::IsNop(Instr instr, unsigned int type) {
+ // See Assembler::nop(type).
+ DCHECK_LT(type, 32);
+
+ Instr instr1 =
+ ANDI | ((type & kImm12Mask) << kRkShift) | (zero_reg.code() << kRjShift);
+
+ return instr == instr1;
+}
+
+static inline int32_t GetOffsetOfBranch(Instr instr,
+ Assembler::OffsetSize bits) {
+ int32_t result = 0;
+ if (bits == 16) {
+ result = (instr << 6) >> 16;
+ } else if (bits == 21) {
+ uint32_t low16 = instr << 6;
+ low16 = low16 >> 16;
+ low16 &= 0xffff;
+ int32_t hi5 = (instr << 27) >> 11;
+ result = hi5 | low16;
+ } else {
+ uint32_t low16 = instr << 6;
+ low16 = low16 >> 16;
+ low16 &= 0xffff;
+ int32_t hi10 = (instr << 22) >> 6;
+ result = hi10 | low16;
+ DCHECK_EQ(bits, 26);
+ }
+ return result << 2;
+}
+
+static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
+ if (Assembler::IsB(instr)) {
+ return Assembler::OffsetSize::kOffset26;
+ } else if (Assembler::IsBz(instr)) {
+ return Assembler::OffsetSize::kOffset21;
+ } else {
+ DCHECK(Assembler::IsBranch(instr));
+ return Assembler::OffsetSize::kOffset16;
+ }
+}
+
+static inline int32_t AddBranchOffset(int pos, Instr instr) {
+ Assembler::OffsetSize bits = OffsetSizeInBits(instr);
+
+ int32_t imm = GetOffsetOfBranch(instr, bits);
+
+ if (imm == kEndOfChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ // Handle the case that next branch position is 0.
+ // TODO(LOONG_dev): Define -4 as a constant
+ int32_t offset = pos + imm;
+ return offset == 0 ? -4 : offset;
+ }
+}
+
+int Assembler::target_at(int pos, bool is_internal) {
+ if (is_internal) {
+ int64_t* p = reinterpret_cast<int64_t*>(buffer_start_ + pos);
+ int64_t address = *p;
+ if (address == kEndOfJumpChain) {
+ return kEndOfChain;
+ } else {
+ int64_t instr_address = reinterpret_cast<int64_t>(p);
+ DCHECK(instr_address - address < INT_MAX);
+ int delta = static_cast<int>(instr_address - address);
+ DCHECK(pos > delta);
+ return pos - delta;
+ }
+ }
+ Instr instr = instr_at(pos);
+
+ // TODO(LOONG_dev) remove after remove label_at_put?
+ if ((instr & ~kImm16Mask) == 0) {
+ // Emitted label constant, not part of a branch.
+ if (instr == 0) {
+ return kEndOfChain;
+ } else {
+ int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+ return (imm18 + pos);
+ }
+ }
+
+ // Check we have a branch or jump instruction.
+ DCHECK(IsBranch(instr) || IsPcAddi(instr, t8, 16));
+ // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
+ // the compiler uses arithmetic shifts for signed integers.
+ if (IsBranch(instr)) {
+ return AddBranchOffset(pos, instr);
+ } else {
+ DCHECK(IsPcAddi(instr, t8, 16));
+ // see BranchLong(Label* L) and BranchAndLinkLong ??
+ int32_t imm32;
+ Instr instr_lu12i_w = instr_at(pos + 1 * kInstrSize);
+ Instr instr_ori = instr_at(pos + 2 * kInstrSize);
+ DCHECK(IsLu12i_w(instr_lu12i_w));
+ imm32 = ((instr_lu12i_w >> 5) & 0xfffff) << 12;
+ imm32 |= ((instr_ori >> 10) & static_cast<int32_t>(kImm12Mask));
+ if (imm32 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ }
+ return pos + imm32;
+ }
+}
+
+static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
+ Instr instr) {
+ int32_t bits = OffsetSizeInBits(instr);
+ int32_t imm = target_pos - pos;
+ DCHECK_EQ(imm & 3, 0);
+ imm >>= 2;
+
+ DCHECK(is_intn(imm, bits));
+
+ if (bits == 16) {
+ const int32_t mask = ((1 << 16) - 1) << 10;
+ instr &= ~mask;
+ return instr | ((imm << 10) & mask);
+ } else if (bits == 21) {
+ const int32_t mask = 0x3fffc1f;
+ instr &= ~mask;
+ uint32_t low16 = (imm & kImm16Mask) << 10;
+ int32_t hi5 = (imm >> 16) & 0x1f;
+ return instr | low16 | hi5;
+ } else {
+ DCHECK_EQ(bits, 26);
+ const int32_t mask = 0x3ffffff;
+ instr &= ~mask;
+ uint32_t low16 = (imm & kImm16Mask) << 10;
+ int32_t hi10 = (imm >> 16) & 0x3ff;
+ return instr | low16 | hi10;
+ }
+}
+
+void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
+ if (is_internal) {
+ uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
+ *reinterpret_cast<uint64_t*>(buffer_start_ + pos) = imm;
+ return;
+ }
+ Instr instr = instr_at(pos);
+ if ((instr & ~kImm16Mask) == 0) {
+ DCHECK(target_pos == kEndOfChain || target_pos >= 0);
+ // Emitted label constant, not part of a branch.
+ // Make label relative to Code pointer of generated Code object.
+ instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ return;
+ }
+
+ DCHECK(IsBranch(instr));
+ instr = SetBranchOffset(pos, target_pos, instr);
+ instr_at_put(pos, instr);
+}
+
+void Assembler::print(const Label* L) {
+ if (L->is_unused()) {
+ PrintF("unused label\n");
+ } else if (L->is_bound()) {
+ PrintF("bound label to %d\n", L->pos());
+ } else if (L->is_linked()) {
+ Label l;
+ l.link_to(L->pos());
+ PrintF("unbound label");
+ while (l.is_linked()) {
+ PrintF("@ %d ", l.pos());
+ Instr instr = instr_at(l.pos());
+ if ((instr & ~kImm16Mask) == 0) {
+ PrintF("value\n");
+ } else {
+ PrintF("%d\n", instr);
+ }
+ next(&l, is_internal_reference(&l));
+ }
+ } else {
+ PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+ }
+}
+
+void Assembler::bind_to(Label* L, int pos) {
+ DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
+ int trampoline_pos = kInvalidSlotPos;
+ bool is_internal = false;
+ if (L->is_linked() && !trampoline_emitted_) {
+ unbound_labels_count_--;
+ if (!is_internal_reference(L)) {
+ next_buffer_check_ += kTrampolineSlotsSize;
+ }
+ }
+
+ while (L->is_linked()) {
+ int fixup_pos = L->pos();
+ int dist = pos - fixup_pos;
+ is_internal = is_internal_reference(L);
+ next(L, is_internal); // Call next before overwriting link with target at
+ // fixup_pos.
+ Instr instr = instr_at(fixup_pos);
+ if (is_internal) {
+ target_at_put(fixup_pos, pos, is_internal);
+ } else {
+ if (IsBranch(instr)) {
+ int branch_offset = BranchOffset(instr);
+ if (dist > branch_offset) {
+ if (trampoline_pos == kInvalidSlotPos) {
+ trampoline_pos = get_trampoline_entry(fixup_pos);
+ CHECK_NE(trampoline_pos, kInvalidSlotPos);
+ }
+ CHECK((trampoline_pos - fixup_pos) <= branch_offset);
+ target_at_put(fixup_pos, trampoline_pos, false);
+ fixup_pos = trampoline_pos;
+ }
+ target_at_put(fixup_pos, pos, false);
+ } else {
+ DCHECK(IsJ(instr) || IsLu12i_w(instr) || IsEmittedConstant(instr) ||
+ IsPcAddi(instr, t8, 8));
+ target_at_put(fixup_pos, pos, false);
+ }
+ }
+ }
+ L->bind_to(pos);
+
+ // Keep track of the last bound label so we don't eliminate any instructions
+ // before a bound label.
+ if (pos > last_bound_pos_) last_bound_pos_ = pos;
+}
+
+void Assembler::bind(Label* L) {
+ DCHECK(!L->is_bound()); // Label can only be bound once.
+ bind_to(L, pc_offset());
+}
+
+void Assembler::next(Label* L, bool is_internal) {
+ DCHECK(L->is_linked());
+ int link = target_at(L->pos(), is_internal);
+ if (link == kEndOfChain) {
+ L->Unuse();
+ } else if (link == -4) {
+ // Next position is pc_offset == 0
+ L->link_to(0);
+ } else {
+ DCHECK_GE(link, 0);
+ L->link_to(link);
+ }
+}
+
+bool Assembler::is_near_c(Label* L) {
+ DCHECK(L->is_bound());
+ return pc_offset() - L->pos() < kMax16BranchOffset - 4 * kInstrSize;
+}
+
+bool Assembler::is_near(Label* L, OffsetSize bits) {
+ DCHECK(L->is_bound());
+ return ((pc_offset() - L->pos()) <
+ (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize);
+}
+
+bool Assembler::is_near_a(Label* L) {
+ DCHECK(L->is_bound());
+ return pc_offset() - L->pos() <= kMax26BranchOffset - 4 * kInstrSize;
+}
+
+int Assembler::BranchOffset(Instr instr) {
+ int bits = OffsetSize::kOffset16;
+
+ uint32_t opcode = (instr >> 26) << 26;
+ switch (opcode) {
+ case B:
+ case BL:
+ bits = OffsetSize::kOffset26;
+ break;
+ case BNEZ:
+ case BEQZ:
+ case BCZ:
+ bits = OffsetSize::kOffset21;
+ break;
+ case BNE:
+ case BEQ:
+ case BLT:
+ case BGE:
+ case BLTU:
+ case BGEU:
+ case JIRL:
+ bits = OffsetSize::kOffset16;
+ break;
+ default:
+ break;
+ }
+
+ return (1 << (bits + 2 - 1)) - 1;
+}
+
+// We have to use a temporary register for things that can be relocated even
+// if they can be encoded in the LOONG's 16 bits of immediate-offset
+// instruction space. There is no guarantee that the relocated location can be
+// similarly encoded.
+bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
+ return !RelocInfo::IsNone(rmode);
+}
+
+void Assembler::GenB(Opcode opcode, Register rj, int32_t si21) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK((BEQZ == opcode || BNEZ == opcode) && is_int21(si21) && rj.is_valid());
+ Instr instr = opcode | (si21 & kImm16Mask) << kRkShift |
+ (rj.code() << kRjShift) | ((si21 & 0x1fffff) >> 16);
+ emit(instr);
+}
+
+void Assembler::GenB(Opcode opcode, CFRegister cj, int32_t si21, bool isEq) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(BCZ == opcode && is_int21(si21));
+ DCHECK(cj >= 0 && cj <= 7);
+ int32_t sc = (isEq ? cj : cj + 8);
+ Instr instr = opcode | (si21 & kImm16Mask) << kRkShift | (sc << kRjShift) |
+ ((si21 & 0x1fffff) >> 16);
+ emit(instr);
+}
+
+void Assembler::GenB(Opcode opcode, int32_t si26) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK((B == opcode || BL == opcode) && is_int26(si26));
+ Instr instr =
+ opcode | ((si26 & kImm16Mask) << kRkShift) | ((si26 & kImm26Mask) >> 16);
+ emit(instr);
+}
+
+void Assembler::GenBJ(Opcode opcode, Register rj, Register rd, int32_t si16) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(is_int16(si16));
+ Instr instr = opcode | ((si16 & kImm16Mask) << kRkShift) |
+ (rj.code() << kRjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenCmp(Opcode opcode, FPUCondition cond, FPURegister fk,
+ FPURegister fj, CFRegister cd) {
+ DCHECK(opcode == FCMP_COND_S || opcode == FCMP_COND_D);
+ Instr instr = opcode | cond << kCondShift | (fk.code() << kFkShift) |
+ (fj.code() << kFjShift) | cd;
+ emit(instr);
+}
+
+void Assembler::GenSel(Opcode opcode, CFRegister ca, FPURegister fk,
+ FPURegister fj, FPURegister rd) {
+ DCHECK((opcode == FSEL));
+ Instr instr = opcode | ca << kCondShift | (fk.code() << kFkShift) |
+ (fj.code() << kFjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, Register rj, Register rd,
+ bool rjrd) {
+ DCHECK(rjrd);
+ Instr instr = 0;
+ instr = opcode | (rj.code() << kRjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, FPURegister fj, FPURegister fd) {
+ Instr instr = opcode | (fj.code() << kFjShift) | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, Register rj, FPURegister fd) {
+ DCHECK((opcode == MOVGR2FR_W) || (opcode == MOVGR2FR_D) ||
+ (opcode == MOVGR2FRH_W));
+ Instr instr = opcode | (rj.code() << kRjShift) | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, FPURegister fj, Register rd) {
+ DCHECK((opcode == MOVFR2GR_S) || (opcode == MOVFR2GR_D) ||
+ (opcode == MOVFRH2GR_S));
+ Instr instr = opcode | (fj.code() << kFjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, Register rj, FPUControlRegister fd) {
+ DCHECK((opcode == MOVGR2FCSR));
+ Instr instr = opcode | (rj.code() << kRjShift) | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, FPUControlRegister fj, Register rd) {
+ DCHECK((opcode == MOVFCSR2GR));
+ Instr instr = opcode | (fj.code() << kFjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, FPURegister fj, CFRegister cd) {
+ DCHECK((opcode == MOVFR2CF));
+ Instr instr = opcode | (fj.code() << kFjShift) | cd;
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, CFRegister cj, FPURegister fd) {
+ DCHECK((opcode == MOVCF2FR));
+ Instr instr = opcode | cj << kFjShift | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, Register rj, CFRegister cd) {
+ DCHECK((opcode == MOVGR2CF));
+ Instr instr = opcode | (rj.code() << kRjShift) | cd;
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, CFRegister cj, Register rd) {
+ DCHECK((opcode == MOVCF2GR));
+ Instr instr = opcode | cj << kFjShift | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, Register rk, Register rj,
+ Register rd) {
+ Instr instr =
+ opcode | (rk.code() << kRkShift) | (rj.code() << kRjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, FPURegister fk, FPURegister fj,
+ FPURegister fd) {
+ Instr instr =
+ opcode | (fk.code() << kFkShift) | (fj.code() << kFjShift) | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, FPURegister fa, FPURegister fk,
+ FPURegister fj, FPURegister fd) {
+ Instr instr = opcode | (fa.code() << kFaShift) | (fk.code() << kFkShift) |
+ (fj.code() << kFjShift) | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, Register rk, Register rj,
+ FPURegister fd) {
+ Instr instr =
+ opcode | (rk.code() << kRkShift) | (rj.code() << kRjShift) | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenImm(Opcode opcode, int32_t bit3, Register rk, Register rj,
+ Register rd) {
+ DCHECK(is_uint3(bit3));
+ Instr instr = opcode | (bit3 & 0x7) << kSaShift | (rk.code() << kRkShift) |
+ (rj.code() << kRjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenImm(Opcode opcode, int32_t bit6m, int32_t bit6l, Register rj,
+ Register rd) {
+ DCHECK(is_uint6(bit6m) && is_uint6(bit6l));
+ Instr instr = opcode | (bit6m & 0x3f) << 16 | (bit6l & 0x3f) << kRkShift |
+ (rj.code() << kRjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenImm(Opcode opcode, int32_t bit20, Register rd) {
+ // DCHECK(is_uint20(bit20) || is_int20(bit20));
+ Instr instr = opcode | (bit20 & 0xfffff) << kRjShift | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenImm(Opcode opcode, int32_t bit15) {
+ DCHECK(is_uint15(bit15));
+ Instr instr = opcode | (bit15 & 0x7fff);
+ emit(instr);
+}
+
+void Assembler::GenImm(Opcode opcode, int32_t value, Register rj, Register rd,
+ int32_t value_bits) {
+ DCHECK(value_bits == 6 || value_bits == 12 || value_bits == 14 ||
+ value_bits == 16);
+ uint32_t imm = value & 0x3f;
+ if (value_bits == 12) {
+ imm = value & kImm12Mask;
+ } else if (value_bits == 14) {
+ imm = value & 0x3fff;
+ } else if (value_bits == 16) {
+ imm = value & kImm16Mask;
+ }
+ Instr instr = opcode | imm << kRkShift | (rj.code() << kRjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenImm(Opcode opcode, int32_t bit12, Register rj,
+ FPURegister fd) {
+ DCHECK(is_int12(bit12));
+ Instr instr = opcode | ((bit12 & kImm12Mask) << kRkShift) |
+ (rj.code() << kRjShift) | fd.code();
+ emit(instr);
+}
+
+// Returns the next free trampoline entry.
+int32_t Assembler::get_trampoline_entry(int32_t pos) {
+ int32_t trampoline_entry = kInvalidSlotPos;
+ if (!internal_trampoline_exception_) {
+ if (trampoline_.start() > pos) {
+ trampoline_entry = trampoline_.take_slot();
+ }
+
+ if (kInvalidSlotPos == trampoline_entry) {
+ internal_trampoline_exception_ = true;
+ }
+ }
+ return trampoline_entry;
+}
+
+uint64_t Assembler::jump_address(Label* L) {
+ int64_t target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ return kEndOfJumpChain;
+ }
+ }
+ uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
+ DCHECK_EQ(imm & 3, 0);
+
+ return imm;
+}
+
+uint64_t Assembler::branch_long_offset(Label* L) {
+ int64_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ return kEndOfJumpChain;
+ }
+ }
+ int64_t offset = target_pos - pc_offset();
+ DCHECK_EQ(offset & 3, 0);
+
+ return static_cast<uint64_t>(offset);
+}
+
+int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
+ int32_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - pc_offset();
+ DCHECK(is_intn(offset, bits + 2));
+ DCHECK_EQ(offset & 3, 0);
+
+ return offset;
+}
+
+void Assembler::label_at_put(Label* L, int at_offset) {
+ int target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ int32_t imm18 = target_pos - at_offset;
+ DCHECK_EQ(imm18 & 3, 0);
+ int32_t imm16 = imm18 >> 2;
+ DCHECK(is_int16(imm16));
+ instr_at_put(at_offset, (imm16 & kImm16Mask));
+ } else {
+ target_pos = kEndOfChain;
+ instr_at_put(at_offset, 0);
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ }
+ L->link_to(at_offset);
+ }
+}
+
+//------- Branch and jump instructions --------
+
+void Assembler::b(int32_t offset) { GenB(B, offset); }
+
+void Assembler::bl(int32_t offset) { GenB(BL, offset); }
+
+void Assembler::beq(Register rj, Register rd, int32_t offset) {
+ GenBJ(BEQ, rj, rd, offset);
+}
+
+void Assembler::bne(Register rj, Register rd, int32_t offset) {
+ GenBJ(BNE, rj, rd, offset);
+}
+
+void Assembler::blt(Register rj, Register rd, int32_t offset) {
+ GenBJ(BLT, rj, rd, offset);
+}
+
+void Assembler::bge(Register rj, Register rd, int32_t offset) {
+ GenBJ(BGE, rj, rd, offset);
+}
+
+void Assembler::bltu(Register rj, Register rd, int32_t offset) {
+ GenBJ(BLTU, rj, rd, offset);
+}
+
+void Assembler::bgeu(Register rj, Register rd, int32_t offset) {
+ GenBJ(BGEU, rj, rd, offset);
+}
+
+void Assembler::beqz(Register rj, int32_t offset) { GenB(BEQZ, rj, offset); }
+void Assembler::bnez(Register rj, int32_t offset) { GenB(BNEZ, rj, offset); }
+
+void Assembler::jirl(Register rd, Register rj, int32_t offset) {
+ GenBJ(JIRL, rj, rd, offset);
+}
+
+void Assembler::bceqz(CFRegister cj, int32_t si21) {
+ GenB(BCZ, cj, si21, true);
+}
+
+void Assembler::bcnez(CFRegister cj, int32_t si21) {
+ GenB(BCZ, cj, si21, false);
+}
+
+// -------Data-processing-instructions---------
+
+// Arithmetic.
+void Assembler::add_w(Register rd, Register rj, Register rk) {
+ GenRegister(ADD_W, rk, rj, rd);
+}
+
+void Assembler::add_d(Register rd, Register rj, Register rk) {
+ GenRegister(ADD_D, rk, rj, rd);
+}
+
+void Assembler::sub_w(Register rd, Register rj, Register rk) {
+ GenRegister(SUB_W, rk, rj, rd);
+}
+
+void Assembler::sub_d(Register rd, Register rj, Register rk) {
+ GenRegister(SUB_D, rk, rj, rd);
+}
+
+void Assembler::addi_w(Register rd, Register rj, int32_t si12) {
+ GenImm(ADDI_W, si12, rj, rd, 12);
+}
+
+void Assembler::addi_d(Register rd, Register rj, int32_t si12) {
+ GenImm(ADDI_D, si12, rj, rd, 12);
+}
+
+void Assembler::addu16i_d(Register rd, Register rj, int32_t si16) {
+ GenImm(ADDU16I_D, si16, rj, rd, 16);
+}
+
+void Assembler::alsl_w(Register rd, Register rj, Register rk, int32_t sa2) {
+ DCHECK(is_uint2(sa2 - 1));
+ GenImm(ALSL_W, sa2 - 1, rk, rj, rd);
+}
+
+void Assembler::alsl_wu(Register rd, Register rj, Register rk, int32_t sa2) {
+ DCHECK(is_uint2(sa2 - 1));
+ GenImm(ALSL_WU, sa2 + 3, rk, rj, rd);
+}
+
+void Assembler::alsl_d(Register rd, Register rj, Register rk, int32_t sa2) {
+ DCHECK(is_uint2(sa2 - 1));
+ GenImm(ALSL_D, sa2 - 1, rk, rj, rd);
+}
+
+void Assembler::lu12i_w(Register rd, int32_t si20) {
+ GenImm(LU12I_W, si20, rd);
+}
+
+void Assembler::lu32i_d(Register rd, int32_t si20) {
+ GenImm(LU32I_D, si20, rd);
+}
+
+void Assembler::lu52i_d(Register rd, Register rj, int32_t si12) {
+ GenImm(LU52I_D, si12, rj, rd, 12);
+}
+
+void Assembler::slt(Register rd, Register rj, Register rk) {
+ GenRegister(SLT, rk, rj, rd);
+}
+
+void Assembler::sltu(Register rd, Register rj, Register rk) {
+ GenRegister(SLTU, rk, rj, rd);
+}
+
+void Assembler::slti(Register rd, Register rj, int32_t si12) {
+ GenImm(SLTI, si12, rj, rd, 12);
+}
+
+void Assembler::sltui(Register rd, Register rj, int32_t si12) {
+ GenImm(SLTUI, si12, rj, rd, 12);
+}
+
+void Assembler::pcaddi(Register rd, int32_t si20) { GenImm(PCADDI, si20, rd); }
+
+void Assembler::pcaddu12i(Register rd, int32_t si20) {
+ GenImm(PCADDU12I, si20, rd);
+}
+
+void Assembler::pcaddu18i(Register rd, int32_t si20) {
+ GenImm(PCADDU18I, si20, rd);
+}
+
+void Assembler::pcalau12i(Register rd, int32_t si20) {
+ GenImm(PCALAU12I, si20, rd);
+}
+
+void Assembler::and_(Register rd, Register rj, Register rk) {
+ GenRegister(AND, rk, rj, rd);
+}
+
+void Assembler::or_(Register rd, Register rj, Register rk) {
+ GenRegister(OR, rk, rj, rd);
+}
+
+void Assembler::xor_(Register rd, Register rj, Register rk) {
+ GenRegister(XOR, rk, rj, rd);
+}
+
+void Assembler::nor(Register rd, Register rj, Register rk) {
+ GenRegister(NOR, rk, rj, rd);
+}
+
+void Assembler::andn(Register rd, Register rj, Register rk) {
+ GenRegister(ANDN, rk, rj, rd);
+}
+
+void Assembler::orn(Register rd, Register rj, Register rk) {
+ GenRegister(ORN, rk, rj, rd);
+}
+
+void Assembler::andi(Register rd, Register rj, int32_t ui12) {
+ GenImm(ANDI, ui12, rj, rd, 12);
+}
+
+void Assembler::ori(Register rd, Register rj, int32_t ui12) {
+ GenImm(ORI, ui12, rj, rd, 12);
+}
+
+void Assembler::xori(Register rd, Register rj, int32_t ui12) {
+ GenImm(XORI, ui12, rj, rd, 12);
+}
+
+void Assembler::mul_w(Register rd, Register rj, Register rk) {
+ GenRegister(MUL_W, rk, rj, rd);
+}
+
+void Assembler::mulh_w(Register rd, Register rj, Register rk) {
+ GenRegister(MULH_W, rk, rj, rd);
+}
+
+void Assembler::mulh_wu(Register rd, Register rj, Register rk) {
+ GenRegister(MULH_WU, rk, rj, rd);
+}
+
+void Assembler::mul_d(Register rd, Register rj, Register rk) {
+ GenRegister(MUL_D, rk, rj, rd);
+}
+
+void Assembler::mulh_d(Register rd, Register rj, Register rk) {
+ GenRegister(MULH_D, rk, rj, rd);
+}
+
+void Assembler::mulh_du(Register rd, Register rj, Register rk) {
+ GenRegister(MULH_DU, rk, rj, rd);
+}
+
+void Assembler::mulw_d_w(Register rd, Register rj, Register rk) {
+ GenRegister(MULW_D_W, rk, rj, rd);
+}
+
+void Assembler::mulw_d_wu(Register rd, Register rj, Register rk) {
+ GenRegister(MULW_D_WU, rk, rj, rd);
+}
+
+void Assembler::div_w(Register rd, Register rj, Register rk) {
+ GenRegister(DIV_W, rk, rj, rd);
+}
+
+void Assembler::mod_w(Register rd, Register rj, Register rk) {
+ GenRegister(MOD_W, rk, rj, rd);
+}
+
+void Assembler::div_wu(Register rd, Register rj, Register rk) {
+ GenRegister(DIV_WU, rk, rj, rd);
+}
+
+void Assembler::mod_wu(Register rd, Register rj, Register rk) {
+ GenRegister(MOD_WU, rk, rj, rd);
+}
+
+void Assembler::div_d(Register rd, Register rj, Register rk) {
+ GenRegister(DIV_D, rk, rj, rd);
+}
+
+void Assembler::mod_d(Register rd, Register rj, Register rk) {
+ GenRegister(MOD_D, rk, rj, rd);
+}
+
+void Assembler::div_du(Register rd, Register rj, Register rk) {
+ GenRegister(DIV_DU, rk, rj, rd);
+}
+
+void Assembler::mod_du(Register rd, Register rj, Register rk) {
+ GenRegister(MOD_DU, rk, rj, rd);
+}
+
+// Shifts.
+void Assembler::sll_w(Register rd, Register rj, Register rk) {
+ GenRegister(SLL_W, rk, rj, rd);
+}
+
+void Assembler::srl_w(Register rd, Register rj, Register rk) {
+ GenRegister(SRL_W, rk, rj, rd);
+}
+
+void Assembler::sra_w(Register rd, Register rj, Register rk) {
+ GenRegister(SRA_W, rk, rj, rd);
+}
+
+void Assembler::rotr_w(Register rd, Register rj, Register rk) {
+ GenRegister(ROTR_W, rk, rj, rd);
+}
+
+void Assembler::slli_w(Register rd, Register rj, int32_t ui5) {
+ DCHECK(is_uint5(ui5));
+ GenImm(SLLI_W, ui5 + 0x20, rj, rd, 6);
+}
+
+void Assembler::srli_w(Register rd, Register rj, int32_t ui5) {
+ DCHECK(is_uint5(ui5));
+ GenImm(SRLI_W, ui5 + 0x20, rj, rd, 6);
+}
+
+void Assembler::srai_w(Register rd, Register rj, int32_t ui5) {
+ DCHECK(is_uint5(ui5));
+ GenImm(SRAI_W, ui5 + 0x20, rj, rd, 6);
+}
+
+void Assembler::rotri_w(Register rd, Register rj, int32_t ui5) {
+ DCHECK(is_uint5(ui5));
+ GenImm(ROTRI_W, ui5 + 0x20, rj, rd, 6);
+}
+
+void Assembler::sll_d(Register rd, Register rj, Register rk) {
+ GenRegister(SLL_D, rk, rj, rd);
+}
+
+void Assembler::srl_d(Register rd, Register rj, Register rk) {
+ GenRegister(SRL_D, rk, rj, rd);
+}
+
+void Assembler::sra_d(Register rd, Register rj, Register rk) {
+ GenRegister(SRA_D, rk, rj, rd);
+}
+
+void Assembler::rotr_d(Register rd, Register rj, Register rk) {
+ GenRegister(ROTR_D, rk, rj, rd);
+}
+
+void Assembler::slli_d(Register rd, Register rj, int32_t ui6) {
+ GenImm(SLLI_D, ui6, rj, rd, 6);
+}
+
+void Assembler::srli_d(Register rd, Register rj, int32_t ui6) {
+ GenImm(SRLI_D, ui6, rj, rd, 6);
+}
+
+void Assembler::srai_d(Register rd, Register rj, int32_t ui6) {
+ GenImm(SRAI_D, ui6, rj, rd, 6);
+}
+
+void Assembler::rotri_d(Register rd, Register rj, int32_t ui6) {
+ GenImm(ROTRI_D, ui6, rj, rd, 6);
+}
+
+// Bit twiddling.
+void Assembler::ext_w_b(Register rd, Register rj) {
+ GenRegister(EXT_W_B, rj, rd);
+}
+
+void Assembler::ext_w_h(Register rd, Register rj) {
+ GenRegister(EXT_W_H, rj, rd);
+}
+
+void Assembler::clo_w(Register rd, Register rj) { GenRegister(CLO_W, rj, rd); }
+
+void Assembler::clz_w(Register rd, Register rj) { GenRegister(CLZ_W, rj, rd); }
+
+void Assembler::cto_w(Register rd, Register rj) { GenRegister(CTO_W, rj, rd); }
+
+void Assembler::ctz_w(Register rd, Register rj) { GenRegister(CTZ_W, rj, rd); }
+
+void Assembler::clo_d(Register rd, Register rj) { GenRegister(CLO_D, rj, rd); }
+
+void Assembler::clz_d(Register rd, Register rj) { GenRegister(CLZ_D, rj, rd); }
+
+void Assembler::cto_d(Register rd, Register rj) { GenRegister(CTO_D, rj, rd); }
+
+void Assembler::ctz_d(Register rd, Register rj) { GenRegister(CTZ_D, rj, rd); }
+
+void Assembler::bytepick_w(Register rd, Register rj, Register rk, int32_t sa2) {
+ DCHECK(is_uint2(sa2));
+ GenImm(BYTEPICK_W, sa2, rk, rj, rd);
+}
+
+void Assembler::bytepick_d(Register rd, Register rj, Register rk, int32_t sa3) {
+ GenImm(BYTEPICK_D, sa3, rk, rj, rd);
+}
+
+void Assembler::revb_2h(Register rd, Register rj) {
+ GenRegister(REVB_2H, rj, rd);
+}
+
+void Assembler::revb_4h(Register rd, Register rj) {
+ GenRegister(REVB_4H, rj, rd);
+}
+
+void Assembler::revb_2w(Register rd, Register rj) {
+ GenRegister(REVB_2W, rj, rd);
+}
+
+void Assembler::revb_d(Register rd, Register rj) {
+ GenRegister(REVB_D, rj, rd);
+}
+
+void Assembler::revh_2w(Register rd, Register rj) {
+ GenRegister(REVH_2W, rj, rd);
+}
+
+void Assembler::revh_d(Register rd, Register rj) {
+ GenRegister(REVH_D, rj, rd);
+}
+
+void Assembler::bitrev_4b(Register rd, Register rj) {
+ GenRegister(BITREV_4B, rj, rd);
+}
+
+void Assembler::bitrev_8b(Register rd, Register rj) {
+ GenRegister(BITREV_8B, rj, rd);
+}
+
+void Assembler::bitrev_w(Register rd, Register rj) {
+ GenRegister(BITREV_W, rj, rd);
+}
+
+void Assembler::bitrev_d(Register rd, Register rj) {
+ GenRegister(BITREV_D, rj, rd);
+}
+
+void Assembler::bstrins_w(Register rd, Register rj, int32_t msbw,
+ int32_t lsbw) {
+ DCHECK(is_uint5(msbw) && is_uint5(lsbw));
+ GenImm(BSTR_W, msbw + 0x20, lsbw, rj, rd);
+}
+
+void Assembler::bstrins_d(Register rd, Register rj, int32_t msbd,
+ int32_t lsbd) {
+ GenImm(BSTRINS_D, msbd, lsbd, rj, rd);
+}
+
+void Assembler::bstrpick_w(Register rd, Register rj, int32_t msbw,
+ int32_t lsbw) {
+ DCHECK(is_uint5(msbw) && is_uint5(lsbw));
+ GenImm(BSTR_W, msbw + 0x20, lsbw + 0x20, rj, rd);
+}
+
+void Assembler::bstrpick_d(Register rd, Register rj, int32_t msbd,
+ int32_t lsbd) {
+ GenImm(BSTRPICK_D, msbd, lsbd, rj, rd);
+}
+
+void Assembler::maskeqz(Register rd, Register rj, Register rk) {
+ GenRegister(MASKEQZ, rk, rj, rd);
+}
+
+void Assembler::masknez(Register rd, Register rj, Register rk) {
+ GenRegister(MASKNEZ, rk, rj, rd);
+}
+
+// Memory-instructions
+void Assembler::ld_b(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_B, si12, rj, rd, 12);
+}
+
+void Assembler::ld_h(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_H, si12, rj, rd, 12);
+}
+
+void Assembler::ld_w(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_W, si12, rj, rd, 12);
+}
+
+void Assembler::ld_d(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_D, si12, rj, rd, 12);
+}
+
+void Assembler::ld_bu(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_BU, si12, rj, rd, 12);
+}
+
+void Assembler::ld_hu(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_HU, si12, rj, rd, 12);
+}
+
+void Assembler::ld_wu(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_WU, si12, rj, rd, 12);
+}
+
+void Assembler::st_b(Register rd, Register rj, int32_t si12) {
+ GenImm(ST_B, si12, rj, rd, 12);
+}
+
+void Assembler::st_h(Register rd, Register rj, int32_t si12) {
+ GenImm(ST_H, si12, rj, rd, 12);
+}
+
+void Assembler::st_w(Register rd, Register rj, int32_t si12) {
+ GenImm(ST_W, si12, rj, rd, 12);
+}
+
+void Assembler::st_d(Register rd, Register rj, int32_t si12) {
+ GenImm(ST_D, si12, rj, rd, 12);
+}
+
+void Assembler::ldx_b(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_B, rk, rj, rd);
+}
+
+void Assembler::ldx_h(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_H, rk, rj, rd);
+}
+
+void Assembler::ldx_w(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_W, rk, rj, rd);
+}
+
+void Assembler::ldx_d(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_D, rk, rj, rd);
+}
+
+void Assembler::ldx_bu(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_BU, rk, rj, rd);
+}
+
+void Assembler::ldx_hu(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_HU, rk, rj, rd);
+}
+
+void Assembler::ldx_wu(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_WU, rk, rj, rd);
+}
+
+void Assembler::stx_b(Register rd, Register rj, Register rk) {
+ GenRegister(STX_B, rk, rj, rd);
+}
+
+void Assembler::stx_h(Register rd, Register rj, Register rk) {
+ GenRegister(STX_H, rk, rj, rd);
+}
+
+void Assembler::stx_w(Register rd, Register rj, Register rk) {
+ GenRegister(STX_W, rk, rj, rd);
+}
+
+void Assembler::stx_d(Register rd, Register rj, Register rk) {
+ GenRegister(STX_D, rk, rj, rd);
+}
+
+void Assembler::ldptr_w(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(LDPTR_W, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::ldptr_d(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(LDPTR_D, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::stptr_w(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(STPTR_W, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::stptr_d(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(STPTR_D, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::amswap_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMSWAP_W, rk, rj, rd);
+}
+
+void Assembler::amswap_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMSWAP_D, rk, rj, rd);
+}
+
+void Assembler::amadd_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMADD_W, rk, rj, rd);
+}
+
+void Assembler::amadd_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMADD_D, rk, rj, rd);
+}
+
+void Assembler::amand_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMAND_W, rk, rj, rd);
+}
+
+void Assembler::amand_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMAND_D, rk, rj, rd);
+}
+
+void Assembler::amor_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMOR_W, rk, rj, rd);
+}
+
+void Assembler::amor_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMOR_D, rk, rj, rd);
+}
+
+void Assembler::amxor_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMXOR_W, rk, rj, rd);
+}
+
+void Assembler::amxor_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMXOR_D, rk, rj, rd);
+}
+
+void Assembler::ammax_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_W, rk, rj, rd);
+}
+
+void Assembler::ammax_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_D, rk, rj, rd);
+}
+
+void Assembler::ammin_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_W, rk, rj, rd);
+}
+
+void Assembler::ammin_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_D, rk, rj, rd);
+}
+
+void Assembler::ammax_wu(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_WU, rk, rj, rd);
+}
+
+void Assembler::ammax_du(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_DU, rk, rj, rd);
+}
+
+void Assembler::ammin_wu(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_WU, rk, rj, rd);
+}
+
+void Assembler::ammin_du(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_DU, rk, rj, rd);
+}
+
+void Assembler::amswap_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMSWAP_DB_W, rk, rj, rd);
+}
+
+void Assembler::amswap_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMSWAP_DB_D, rk, rj, rd);
+}
+
+void Assembler::amadd_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMADD_DB_W, rk, rj, rd);
+}
+
+void Assembler::amadd_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMADD_DB_D, rk, rj, rd);
+}
+
+void Assembler::amand_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMAND_DB_W, rk, rj, rd);
+}
+
+void Assembler::amand_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMAND_DB_D, rk, rj, rd);
+}
+
+void Assembler::amor_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMOR_DB_W, rk, rj, rd);
+}
+
+void Assembler::amor_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMOR_DB_D, rk, rj, rd);
+}
+
+void Assembler::amxor_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMXOR_DB_W, rk, rj, rd);
+}
+
+void Assembler::amxor_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMXOR_DB_D, rk, rj, rd);
+}
+
+void Assembler::ammax_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_DB_W, rk, rj, rd);
+}
+
+void Assembler::ammax_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_DB_D, rk, rj, rd);
+}
+
+void Assembler::ammin_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_DB_W, rk, rj, rd);
+}
+
+void Assembler::ammin_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_DB_D, rk, rj, rd);
+}
+
+void Assembler::ammax_db_wu(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_DB_WU, rk, rj, rd);
+}
+
+void Assembler::ammax_db_du(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_DB_DU, rk, rj, rd);
+}
+
+void Assembler::ammin_db_wu(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_DB_WU, rk, rj, rd);
+}
+
+void Assembler::ammin_db_du(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_DB_DU, rk, rj, rd);
+}
+
+void Assembler::ll_w(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(LL_W, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::ll_d(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(LL_D, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::sc_w(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(SC_W, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::sc_d(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(SC_D, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::dbar(int32_t hint) { GenImm(DBAR, hint); }
+
+void Assembler::ibar(int32_t hint) { GenImm(IBAR, hint); }
+
+// Break instruction.
+void Assembler::break_(uint32_t code, bool break_as_stop) {
+ DCHECK(
+ (break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) ||
+ (!break_as_stop && (code > kMaxStopCode || code <= kMaxWatchpointCode)));
+ GenImm(BREAK, code);
+}
+
+void Assembler::stop(uint32_t code) {
+ DCHECK_GT(code, kMaxWatchpointCode);
+ DCHECK_LE(code, kMaxStopCode);
+#if defined(V8_HOST_ARCH_LOONG64)
+ break_(0x4321);
+#else // V8_HOST_ARCH_LOONG64
+ break_(code, true);
+#endif
+}
+
+void Assembler::fadd_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FADD_S, fk, fj, fd);
+}
+
+void Assembler::fadd_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FADD_D, fk, fj, fd);
+}
+
+void Assembler::fsub_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FSUB_S, fk, fj, fd);
+}
+
+void Assembler::fsub_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FSUB_D, fk, fj, fd);
+}
+
+void Assembler::fmul_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMUL_S, fk, fj, fd);
+}
+
+void Assembler::fmul_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMUL_D, fk, fj, fd);
+}
+
+void Assembler::fdiv_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FDIV_S, fk, fj, fd);
+}
+
+void Assembler::fdiv_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FDIV_D, fk, fj, fd);
+}
+
+void Assembler::fmadd_s(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FMADD_S, fa, fk, fj, fd);
+}
+
+void Assembler::fmadd_d(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FMADD_D, fa, fk, fj, fd);
+}
+
+void Assembler::fmsub_s(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FMSUB_S, fa, fk, fj, fd);
+}
+
+void Assembler::fmsub_d(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FMSUB_D, fa, fk, fj, fd);
+}
+
+void Assembler::fnmadd_s(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FNMADD_S, fa, fk, fj, fd);
+}
+
+void Assembler::fnmadd_d(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FNMADD_D, fa, fk, fj, fd);
+}
+
+void Assembler::fnmsub_s(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FNMSUB_S, fa, fk, fj, fd);
+}
+
+void Assembler::fnmsub_d(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FNMSUB_D, fa, fk, fj, fd);
+}
+
+void Assembler::fmax_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMAX_S, fk, fj, fd);
+}
+
+void Assembler::fmax_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMAX_D, fk, fj, fd);
+}
+
+void Assembler::fmin_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMIN_S, fk, fj, fd);
+}
+
+void Assembler::fmin_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMIN_D, fk, fj, fd);
+}
+
+void Assembler::fmaxa_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMAXA_S, fk, fj, fd);
+}
+
+void Assembler::fmaxa_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMAXA_D, fk, fj, fd);
+}
+
+void Assembler::fmina_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMINA_S, fk, fj, fd);
+}
+
+void Assembler::fmina_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMINA_D, fk, fj, fd);
+}
+
+void Assembler::fabs_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FABS_S, fj, fd);
+}
+
+void Assembler::fabs_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FABS_D, fj, fd);
+}
+
+void Assembler::fneg_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FNEG_S, fj, fd);
+}
+
+void Assembler::fneg_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FNEG_D, fj, fd);
+}
+
+void Assembler::fsqrt_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FSQRT_S, fj, fd);
+}
+
+void Assembler::fsqrt_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FSQRT_D, fj, fd);
+}
+
+void Assembler::frecip_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FRECIP_S, fj, fd);
+}
+
+void Assembler::frecip_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FRECIP_D, fj, fd);
+}
+
+void Assembler::frsqrt_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FRSQRT_S, fj, fd);
+}
+
+void Assembler::frsqrt_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FRSQRT_D, fj, fd);
+}
+
+void Assembler::fscaleb_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FSCALEB_S, fk, fj, fd);
+}
+
+void Assembler::fscaleb_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FSCALEB_D, fk, fj, fd);
+}
+
+void Assembler::flogb_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FLOGB_S, fj, fd);
+}
+
+void Assembler::flogb_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FLOGB_D, fj, fd);
+}
+
+void Assembler::fcopysign_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FCOPYSIGN_S, fk, fj, fd);
+}
+
+void Assembler::fcopysign_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FCOPYSIGN_D, fk, fj, fd);
+}
+
+void Assembler::fclass_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FCLASS_S, fj, fd);
+}
+
+void Assembler::fclass_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FCLASS_D, fj, fd);
+}
+
+void Assembler::fcmp_cond_s(FPUCondition cc, FPURegister fj, FPURegister fk,
+ CFRegister cd) {
+ GenCmp(FCMP_COND_S, cc, fk, fj, cd);
+}
+
+void Assembler::fcmp_cond_d(FPUCondition cc, FPURegister fj, FPURegister fk,
+ CFRegister cd) {
+ GenCmp(FCMP_COND_D, cc, fk, fj, cd);
+}
+
+void Assembler::fcvt_s_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FCVT_S_D, fj, fd);
+}
+
+void Assembler::fcvt_d_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FCVT_D_S, fj, fd);
+}
+
+void Assembler::ffint_s_w(FPURegister fd, FPURegister fj) {
+ GenRegister(FFINT_S_W, fj, fd);
+}
+
+void Assembler::ffint_s_l(FPURegister fd, FPURegister fj) {
+ GenRegister(FFINT_S_L, fj, fd);
+}
+
+void Assembler::ffint_d_w(FPURegister fd, FPURegister fj) {
+ GenRegister(FFINT_D_W, fj, fd);
+}
+
+void Assembler::ffint_d_l(FPURegister fd, FPURegister fj) {
+ GenRegister(FFINT_D_L, fj, fd);
+}
+
+void Assembler::ftint_w_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINT_W_S, fj, fd);
+}
+
+void Assembler::ftint_w_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINT_W_D, fj, fd);
+}
+
+void Assembler::ftint_l_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINT_L_S, fj, fd);
+}
+
+void Assembler::ftint_l_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINT_L_D, fj, fd);
+}
+
+void Assembler::ftintrm_w_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRM_W_S, fj, fd);
+}
+
+void Assembler::ftintrm_w_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRM_W_D, fj, fd);
+}
+
+void Assembler::ftintrm_l_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRM_L_S, fj, fd);
+}
+
+void Assembler::ftintrm_l_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRM_L_D, fj, fd);
+}
+
+void Assembler::ftintrp_w_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRP_W_S, fj, fd);
+}
+
+void Assembler::ftintrp_w_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRP_W_D, fj, fd);
+}
+
+void Assembler::ftintrp_l_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRP_L_S, fj, fd);
+}
+
+void Assembler::ftintrp_l_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRP_L_D, fj, fd);
+}
+
+void Assembler::ftintrz_w_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRZ_W_S, fj, fd);
+}
+
+void Assembler::ftintrz_w_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRZ_W_D, fj, fd);
+}
+
+void Assembler::ftintrz_l_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRZ_L_S, fj, fd);
+}
+
+void Assembler::ftintrz_l_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRZ_L_D, fj, fd);
+}
+
+void Assembler::ftintrne_w_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRNE_W_S, fj, fd);
+}
+
+void Assembler::ftintrne_w_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRNE_W_D, fj, fd);
+}
+
+void Assembler::ftintrne_l_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRNE_L_S, fj, fd);
+}
+
+void Assembler::ftintrne_l_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRNE_L_D, fj, fd);
+}
+
+void Assembler::frint_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FRINT_S, fj, fd);
+}
+
+void Assembler::frint_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FRINT_D, fj, fd);
+}
+
+void Assembler::fmov_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FMOV_S, fj, fd);
+}
+
+void Assembler::fmov_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FMOV_D, fj, fd);
+}
+
+void Assembler::fsel(CFRegister ca, FPURegister fd, FPURegister fj,
+ FPURegister fk) {
+ GenSel(FSEL, ca, fk, fj, fd);
+}
+
+void Assembler::movgr2fr_w(FPURegister fd, Register rj) {
+ GenRegister(MOVGR2FR_W, rj, fd);
+}
+
+void Assembler::movgr2fr_d(FPURegister fd, Register rj) {
+ GenRegister(MOVGR2FR_D, rj, fd);
+}
+
+void Assembler::movgr2frh_w(FPURegister fd, Register rj) {
+ GenRegister(MOVGR2FRH_W, rj, fd);
+}
+
+void Assembler::movfr2gr_s(Register rd, FPURegister fj) {
+ GenRegister(MOVFR2GR_S, fj, rd);
+}
+
+void Assembler::movfr2gr_d(Register rd, FPURegister fj) {
+ GenRegister(MOVFR2GR_D, fj, rd);
+}
+
+void Assembler::movfrh2gr_s(Register rd, FPURegister fj) {
+ GenRegister(MOVFRH2GR_S, fj, rd);
+}
+
+void Assembler::movgr2fcsr(Register rj, FPUControlRegister fcsr) {
+ GenRegister(MOVGR2FCSR, rj, fcsr);
+}
+
+void Assembler::movfcsr2gr(Register rd, FPUControlRegister fcsr) {
+ GenRegister(MOVFCSR2GR, fcsr, rd);
+}
+
+void Assembler::movfr2cf(CFRegister cd, FPURegister fj) {
+ GenRegister(MOVFR2CF, fj, cd);
+}
+
+void Assembler::movcf2fr(FPURegister fd, CFRegister cj) {
+ GenRegister(MOVCF2FR, cj, fd);
+}
+
+void Assembler::movgr2cf(CFRegister cd, Register rj) {
+ GenRegister(MOVGR2CF, rj, cd);
+}
+
+void Assembler::movcf2gr(Register rd, CFRegister cj) {
+ GenRegister(MOVCF2GR, cj, rd);
+}
+
+void Assembler::fld_s(FPURegister fd, Register rj, int32_t si12) {
+ GenImm(FLD_S, si12, rj, fd);
+}
+
+void Assembler::fld_d(FPURegister fd, Register rj, int32_t si12) {
+ GenImm(FLD_D, si12, rj, fd);
+}
+
+void Assembler::fst_s(FPURegister fd, Register rj, int32_t si12) {
+ GenImm(FST_S, si12, rj, fd);
+}
+
+void Assembler::fst_d(FPURegister fd, Register rj, int32_t si12) {
+ GenImm(FST_D, si12, rj, fd);
+}
+
+void Assembler::fldx_s(FPURegister fd, Register rj, Register rk) {
+ GenRegister(FLDX_S, rk, rj, fd);
+}
+
+void Assembler::fldx_d(FPURegister fd, Register rj, Register rk) {
+ GenRegister(FLDX_D, rk, rj, fd);
+}
+
+void Assembler::fstx_s(FPURegister fd, Register rj, Register rk) {
+ GenRegister(FSTX_S, rk, rj, fd);
+}
+
+void Assembler::fstx_d(FPURegister fd, Register rj, Register rk) {
+ GenRegister(FSTX_D, rk, rj, fd);
+}
+
+void Assembler::AdjustBaseAndOffset(MemOperand* src) {
+ // is_int12 must be passed a signed value, hence the static cast below.
+ if ((!src->hasIndexReg() && is_int12(src->offset())) || src->hasIndexReg()) {
+ return;
+ }
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (is_uint12(static_cast<int32_t>(src->offset()))) {
+ ori(scratch, zero_reg, src->offset() & kImm12Mask);
+ } else {
+ lu12i_w(scratch, src->offset() >> 12 & 0xfffff);
+ if (src->offset() & kImm12Mask) {
+ ori(scratch, scratch, src->offset() & kImm12Mask);
+ }
+ }
+ src->index_ = scratch;
+ src->offset_ = 0;
+}
+
+int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta) {
+ DCHECK(RelocInfo::IsInternalReference(rmode));
+ int64_t* p = reinterpret_cast<int64_t*>(pc);
+ if (*p == kEndOfJumpChain) {
+ return 0; // Number of instructions patched.
+ }
+ *p += pc_delta;
+ return 2; // Number of instructions patched.
+}
+
+void Assembler::RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta) {
+ DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
+ Instr instr = instr_at(pc);
+ int32_t offset = instr & kImm26Mask;
+ offset = (((offset & 0x3ff) << 22 >> 6) | ((offset >> 10) & kImm16Mask)) << 2;
+ offset -= pc_delta;
+ uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+ offset >>= 2;
+ offset = ((offset & kImm16Mask) << kRkShift) | ((offset & kImm26Mask) >> 16);
+ *p = (instr & ~kImm26Mask) | offset;
+ return;
+}
+
+void Assembler::FixOnHeapReferences(bool update_embedded_objects) {
+ if (!update_embedded_objects) return;
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ Address address = reinterpret_cast<Address>(buffer_->start() + p.first);
+ Handle<HeapObject> object(reinterpret_cast<Address*>(p.second));
+ set_target_value_at(address, object->ptr());
+ }
+}
+
+void Assembler::FixOnHeapReferencesToHandles() {
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ Address address = reinterpret_cast<Address>(buffer_->start() + p.first);
+ set_target_value_at(address, p.second);
+ }
+ saved_handles_for_raw_object_ptr_.clear();
+}
+
+void Assembler::GrowBuffer() {
+ bool previously_on_heap = buffer_->IsOnHeap();
+ int previous_on_heap_gc_count = OnHeapGCCount();
+
+ // Compute new buffer size.
+ int old_size = buffer_->size();
+ int new_size = std::min(2 * old_size, old_size + 1 * MB);
+
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if (new_size > kMaximalBufferSize) {
+ V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
+ }
+
+ // Set up new buffer.
+ std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
+ DCHECK_EQ(new_size, new_buffer->size());
+ byte* new_start = new_buffer->start();
+
+ // Copy the data.
+ intptr_t pc_delta = new_start - buffer_start_;
+ intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
+ size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
+ MemMove(new_start, buffer_start_, pc_offset());
+ MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
+ reloc_size);
+
+ // Switch buffers.
+ buffer_ = std::move(new_buffer);
+ buffer_start_ = new_start;
+ pc_ += pc_delta;
+ last_call_pc_ += pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // None of our relocation types are pc relative pointing outside the code
+ // buffer nor pc absolute pointing inside the code buffer, so there is no need
+ // to relocate any emitted relocation entries.
+
+ // Relocate internal references.
+ for (auto pos : internal_reference_positions_) {
+ Address address = reinterpret_cast<intptr_t>(buffer_start_) + pos;
+ intptr_t internal_ref = ReadUnalignedValue<intptr_t>(address);
+ if (internal_ref != kEndOfJumpChain) {
+ internal_ref += pc_delta;
+ WriteUnalignedValue<intptr_t>(address, internal_ref);
+ }
+ }
+
+ // Fix on-heap references.
+ if (previously_on_heap) {
+ if (buffer_->IsOnHeap()) {
+ FixOnHeapReferences(previous_on_heap_gc_count != OnHeapGCCount());
+ } else {
+ FixOnHeapReferencesToHandles();
+ }
+ }
+}
+
+void Assembler::db(uint8_t data) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ *reinterpret_cast<uint8_t*>(pc_) = data;
+ pc_ += sizeof(uint8_t);
+}
+
+void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ if (!RelocInfo::IsNone(rmode)) {
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
+ RecordRelocInfo(rmode);
+ }
+ *reinterpret_cast<uint32_t*>(pc_) = data;
+ pc_ += sizeof(uint32_t);
+}
+
+void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ if (!RelocInfo::IsNone(rmode)) {
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
+ RecordRelocInfo(rmode);
+ }
+ *reinterpret_cast<uint64_t*>(pc_) = data;
+ pc_ += sizeof(uint64_t);
+}
+
+void Assembler::dd(Label* label) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ uint64_t data;
+ if (label->is_bound()) {
+ data = reinterpret_cast<uint64_t>(buffer_start_ + label->pos());
+ } else {
+ data = jump_address(label);
+ unbound_labels_count_++;
+ internal_reference_positions_.insert(label->pos());
+ }
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ EmitHelper(data);
+}
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ if (!ShouldRecordRelocInfo(rmode)) return;
+ // We do not try to reuse pool constants.
+ RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
+ DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
+ reloc_info_writer.Write(&rinfo);
+}
+
+void Assembler::BlockTrampolinePoolFor(int instructions) {
+ CheckTrampolinePoolQuick(instructions);
+ BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
+}
+
+void Assembler::CheckTrampolinePool() {
+ // Some small sequences of instructions must not be broken up by the
+ // insertion of a trampoline pool; such sequences are protected by setting
+ // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
+ // which are both checked here. Also, recursive calls to CheckTrampolinePool
+ // are blocked by trampoline_pool_blocked_nesting_.
+ if ((trampoline_pool_blocked_nesting_ > 0) ||
+ (pc_offset() < no_trampoline_pool_before_)) {
+ // Emission is currently blocked; make sure we try again as soon as
+ // possible.
+ if (trampoline_pool_blocked_nesting_ > 0) {
+ next_buffer_check_ = pc_offset() + kInstrSize;
+ } else {
+ next_buffer_check_ = no_trampoline_pool_before_;
+ }
+ return;
+ }
+
+ DCHECK(!trampoline_emitted_);
+ DCHECK_GE(unbound_labels_count_, 0);
+ if (unbound_labels_count_ > 0) {
+ // First we emit jump (2 instructions), then we emit trampoline pool.
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label after_pool;
+ b(&after_pool);
+ nop(); // TODO(LOONG_dev): remove this
+
+ int pool_start = pc_offset();
+ for (int i = 0; i < unbound_labels_count_; i++) {
+ {
+ b(&after_pool);
+ nop(); // TODO(LOONG_dev): remove this
+ }
+ }
+ nop();
+ trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+ bind(&after_pool);
+
+ trampoline_emitted_ = true;
+ // As we are only going to emit trampoline once, we need to prevent any
+ // further emission.
+ next_buffer_check_ = kMaxInt;
+ }
+ } else {
+ // Number of branches to unbound label at this point is zero, so we can
+ // move next buffer check to maximum.
+ next_buffer_check_ =
+ pc_offset() + kMax16BranchOffset - kTrampolineSlotsSize * 16;
+ }
+ return;
+}
+
+Address Assembler::target_address_at(Address pc) {
+ Instr instr0 = instr_at(pc);
+ if (IsB(instr0)) {
+ int32_t offset = instr0 & kImm26Mask;
+ offset = (((offset & 0x3ff) << 22 >> 6) | ((offset >> 10) & kImm16Mask))
+ << 2;
+ return pc + offset;
+ }
+ Instr instr1 = instr_at(pc + 1 * kInstrSize);
+ Instr instr2 = instr_at(pc + 2 * kInstrSize);
+
+ // Interpret 4 instructions for address generated by li: See listing in
+ // Assembler::set_target_address_at() just below.
+ DCHECK((IsLu12i_w(instr0) && (IsOri(instr1)) && (IsLu32i_d(instr2))));
+
+ // Assemble the 48 bit value.
+ uint64_t hi20 = ((uint64_t)(instr2 >> 5) & 0xfffff) << 32;
+ uint64_t mid20 = ((uint64_t)(instr0 >> 5) & 0xfffff) << 12;
+ uint64_t low12 = ((uint64_t)(instr1 >> 10) & 0xfff);
+ int64_t addr = static_cast<int64_t>(hi20 | mid20 | low12);
+
+ // Sign extend to get canonical address.
+ addr = (addr << 16) >> 16;
+ return static_cast<Address>(addr);
+}
+
+// On loong64, a target address is stored in a 3-instruction sequence:
+// 0: lu12i_w(rd, (j.imm64_ >> 12) & kImm20Mask);
+// 1: ori(rd, rd, j.imm64_ & kImm12Mask);
+// 2: lu32i_d(rd, (j.imm64_ >> 32) & kImm20Mask);
+//
+// Patching the address must replace all the lui & ori instructions,
+// and flush the i-cache.
+//
+void Assembler::set_target_value_at(Address pc, uint64_t target,
+ ICacheFlushMode icache_flush_mode) {
+ // There is an optimization where only 3 instructions are used to load address
+ // in code on LOONG64 because only 48-bits of address is effectively used.
+ // It relies on fact the upper [63:48] bits are not used for virtual address
+ // translation and they have to be set according to value of bit 47 in order
+ // get canonical address.
+#ifdef DEBUG
+ // Check we have the result from a li macro-instruction.
+ Instr instr0 = instr_at(pc);
+ Instr instr1 = instr_at(pc + kInstrSize);
+ Instr instr2 = instr_at(pc + kInstrSize * 2);
+ DCHECK(IsLu12i_w(instr0) && IsOri(instr1) && IsLu32i_d(instr2) ||
+ IsB(instr0));
+#endif
+
+ Instr instr = instr_at(pc);
+ uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+ if (IsB(instr)) {
+ int32_t offset = (target - pc) >> 2;
+ CHECK(is_int26(offset));
+ offset =
+ ((offset & kImm16Mask) << kRkShift) | ((offset & kImm26Mask) >> 16);
+ *p = (instr & ~kImm26Mask) | offset;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ FlushInstructionCache(pc, kInstrSize);
+ }
+ return;
+ }
+ uint32_t rd_code = GetRd(instr);
+
+ // Must use 3 instructions to insure patchable code.
+ // lu12i_w rd, middle-20.
+ // ori rd, rd, low-12.
+ // lu32i_d rd, high-20.
+ *p = LU12I_W | (((target >> 12) & 0xfffff) << kRjShift) | rd_code;
+ *(p + 1) =
+ ORI | (target & 0xfff) << kRkShift | (rd_code << kRjShift) | rd_code;
+ *(p + 2) = LU32I_D | (((target >> 32) & 0xfffff) << kRjShift) | rd_code;
+
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ FlushInstructionCache(pc, 3 * kInstrSize);
+ }
+}
+
+UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
+ : available_(assembler->GetScratchRegisterList()),
+ old_available_(*available_) {}
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+ *available_ = old_available_;
+}
+
+Register UseScratchRegisterScope::Acquire() {
+ DCHECK_NOT_NULL(available_);
+ DCHECK_NE(*available_, 0);
+ int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
+ *available_ &= ~(1UL << index);
+
+ return Register::from_code(index);
+}
+
+bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; }
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/deps/v8/src/codegen/loong64/assembler-loong64.h b/deps/v8/src/codegen/loong64/assembler-loong64.h
new file mode 100644
index 0000000000..b886b2ef43
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/assembler-loong64.h
@@ -0,0 +1,1129 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_H_
+#define V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_H_
+
+#include <stdio.h>
+
+#include <memory>
+#include <set>
+
+#include "src/codegen/assembler.h"
+#include "src/codegen/external-reference.h"
+#include "src/codegen/label.h"
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/loong64/register-loong64.h"
+#include "src/codegen/machine-type.h"
+#include "src/objects/contexts.h"
+#include "src/objects/smi.h"
+
+namespace v8 {
+namespace internal {
+
+class SafepointTableBuilder;
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands.
+constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize;
+constexpr uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
+// Class Operand represents a shifter operand in data processing instructions.
+class Operand {
+ public:
+ // Immediate.
+ V8_INLINE explicit Operand(int64_t immediate,
+ RelocInfo::Mode rmode = RelocInfo::NONE)
+ : rm_(no_reg), rmode_(rmode) {
+ value_.immediate = immediate;
+ }
+ V8_INLINE explicit Operand(const ExternalReference& f)
+ : rm_(no_reg), rmode_(RelocInfo::EXTERNAL_REFERENCE) {
+ value_.immediate = static_cast<int64_t>(f.address());
+ }
+ V8_INLINE explicit Operand(const char* s);
+ explicit Operand(Handle<HeapObject> handle);
+ V8_INLINE explicit Operand(Smi value) : rm_(no_reg), rmode_(RelocInfo::NONE) {
+ value_.immediate = static_cast<intptr_t>(value.ptr());
+ }
+
+ static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
+ static Operand EmbeddedStringConstant(const StringConstantBase* str);
+
+ // Register.
+ V8_INLINE explicit Operand(Register rm) : rm_(rm) {}
+
+ // Return true if this is a register operand.
+ V8_INLINE bool is_reg() const;
+
+ inline int64_t immediate() const;
+
+ bool IsImmediate() const { return !rm_.is_valid(); }
+
+ HeapObjectRequest heap_object_request() const {
+ DCHECK(IsHeapObjectRequest());
+ return value_.heap_object_request;
+ }
+
+ bool IsHeapObjectRequest() const {
+ DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
+ DCHECK_IMPLIES(is_heap_object_request_,
+ rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT ||
+ rmode_ == RelocInfo::CODE_TARGET);
+ return is_heap_object_request_;
+ }
+
+ Register rm() const { return rm_; }
+
+ RelocInfo::Mode rmode() const { return rmode_; }
+
+ private:
+ Register rm_;
+ union Value {
+ Value() {}
+ HeapObjectRequest heap_object_request; // if is_heap_object_request_
+ int64_t immediate; // otherwise
+ } value_; // valid if rm_ == no_reg
+ bool is_heap_object_request_ = false;
+ RelocInfo::Mode rmode_;
+
+ friend class Assembler;
+ friend class MacroAssembler;
+};
+
+// Class MemOperand represents a memory operand in load and store instructions.
+// 1: base_reg + off_imm( si12 | si14<<2)
+// 2: base_reg + offset_reg
+class V8_EXPORT_PRIVATE MemOperand {
+ public:
+ explicit MemOperand(Register rj, int32_t offset = 0);
+ explicit MemOperand(Register rj, Register offset = no_reg);
+ Register base() const { return base_; }
+ Register index() const { return index_; }
+ int32_t offset() const { return offset_; }
+
+ bool hasIndexReg() const { return index_ != no_reg; }
+
+ private:
+ Register base_; // base
+ Register index_; // index
+ int32_t offset_; // offset
+
+ friend class Assembler;
+};
+
+class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is nullptr, the assembler allocates and grows its
+ // own buffer. Otherwise it takes ownership of the provided buffer.
+ explicit Assembler(const AssemblerOptions&,
+ std::unique_ptr<AssemblerBuffer> = {});
+
+ virtual ~Assembler() {}
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor desc.
+ static constexpr int kNoHandlerTable = 0;
+ static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
+ void GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset);
+
+ // Convenience wrapper for code without safepoint or handler tables.
+ void GetCode(Isolate* isolate, CodeDesc* desc) {
+ GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
+ }
+
+ // This function is called when on-heap-compilation invariants are
+ // invalidated. For instance, when the assembler buffer grows or a GC happens
+ // between Code object allocation and Code object finalization.
+ void FixOnHeapReferences(bool update_embedded_objects = true);
+
+ // This function is called when we fallback from on-heap to off-heap
+ // compilation and patch on-heap references to handles.
+ void FixOnHeapReferencesToHandles();
+
+ // Unused on this architecture.
+ void MaybeEmitOutOfLineConstantPool() {}
+
+ // Loong64 uses BlockTrampolinePool to prevent generating trampoline inside a
+ // continuous instruction block. In the destructor of
+ // BlockTrampolinePool, it must check if it needs to generate trampoline
+ // immediately, if it does not do this, the branch range will go beyond the
+ // max branch offset, that means the pc_offset after call CheckTrampolinePool
+ // may be not the Call instruction's location. So we use last_call_pc here for
+ // safepoint record.
+ int pc_offset_for_safepoint() {
+ return static_cast<int>(last_call_pc_ - buffer_start_);
+ }
+
+ // TODO(LOONG_dev): LOONG64 Check this comment
+ // Label operations & relative jumps (PPUM Appendix D).
+ //
+ // Takes a branch opcode (cc) and a label (L) and generates
+ // either a backward branch or a forward branch and links it
+ // to the label fixup chain. Usage:
+ //
+ // Label L; // unbound label
+ // j(cc, &L); // forward branch to unbound label
+ // bind(&L); // bind label to the current pc
+ // j(cc, &L); // backward branch to bound label
+ // bind(&L); // illegal: a label may be bound only once
+ //
+ // Note: The same Label can be used for forward and backward branches
+ // but it may be bound only once.
+ void bind(Label* L); // Binds an unbound label L to current code position.
+
+ enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 };
+
+ // Determines if Label is bound and near enough so that branch instruction
+ // can be used to reach it, instead of jump instruction.
+ // c means conditinal branch, a means always branch.
+ bool is_near_c(Label* L);
+ bool is_near(Label* L, OffsetSize bits);
+ bool is_near_a(Label* L);
+
+ int BranchOffset(Instr instr);
+
+ // Returns the branch offset to the given label from the current code
+ // position. Links the label to the current position if it is still unbound.
+ // Manages the jump elimination optimization if the second parameter is true.
+ int32_t branch_offset_helper(Label* L, OffsetSize bits);
+ inline int32_t branch_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset16);
+ }
+ inline int32_t branch_offset21(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset21);
+ }
+ inline int32_t branch_offset26(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset26);
+ }
+ inline int32_t shifted_branch_offset(Label* L) {
+ return branch_offset(L) >> 2;
+ }
+ inline int32_t shifted_branch_offset21(Label* L) {
+ return branch_offset21(L) >> 2;
+ }
+ inline int32_t shifted_branch_offset26(Label* L) {
+ return branch_offset26(L) >> 2;
+ }
+ uint64_t jump_address(Label* L);
+ uint64_t jump_offset(Label* L);
+ uint64_t branch_long_offset(Label* L);
+
+ // Puts a labels target address at the given position.
+ // The high 8 bits are set to zero.
+ void label_at_put(Label* L, int at_offset);
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ // The isolate argument is unused (and may be nullptr) when skipping flushing.
+ static Address target_address_at(Address pc);
+ V8_INLINE static void set_target_address_at(
+ Address pc, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
+ set_target_value_at(pc, target, icache_flush_mode);
+ }
+ // On LOONG64 there is no Constant Pool so we skip that parameter.
+ V8_INLINE static Address target_address_at(Address pc,
+ Address constant_pool) {
+ return target_address_at(pc);
+ }
+ V8_INLINE static void set_target_address_at(
+ Address pc, Address constant_pool, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
+ set_target_address_at(pc, target, icache_flush_mode);
+ }
+
+ static void set_target_value_at(
+ Address pc, uint64_t target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+
+ static void JumpLabelToJumpRegister(Address pc);
+
+ // This sets the branch destination (which gets loaded at the call address).
+ // This is for calls and branches within generated code. The serializer
+ // has already deserialized the lui/ori instructions etc.
+ inline static void deserialization_set_special_target_at(
+ Address instruction_payload, Code code, Address target);
+
+ // Get the size of the special target encoded at 'instruction_payload'.
+ inline static int deserialization_special_target_size(
+ Address instruction_payload);
+
+ // This sets the internal reference at the pc.
+ inline static void deserialization_set_target_internal_reference_at(
+ Address pc, Address target,
+ RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
+
+ // Here we are patching the address in the LUI/ORI instruction pair.
+ // These values are used in the serialization process and must be zero for
+ // LOONG platform, as Code, Embedded Object or External-reference pointers
+ // are split across two consecutive instructions and don't exist separately
+ // in the code, so the serializer should not step forwards in memory after
+ // a target is resolved and written.
+ static constexpr int kSpecialTargetSize = 0;
+
+ // Number of consecutive instructions used to store 32bit/64bit constant.
+ // This constant was used in RelocInfo::target_address_address() function
+ // to tell serializer address of the instruction that follows
+ // LUI/ORI instruction pair.
+ // TODO(LOONG_dev): check this
+ static constexpr int kInstructionsFor64BitConstant = 4;
+
+ // Max offset for instructions with 16-bit offset field
+ static constexpr int kMax16BranchOffset = (1 << (18 - 1)) - 1;
+
+ // Max offset for instructions with 21-bit offset field
+ static constexpr int kMax21BranchOffset = (1 << (23 - 1)) - 1;
+
+ // Max offset for compact branch instructions with 26-bit offset field
+ static constexpr int kMax26BranchOffset = (1 << (28 - 1)) - 1;
+
+ static constexpr int kTrampolineSlotsSize = 2 * kInstrSize;
+
+ RegList* GetScratchRegisterList() { return &scratch_register_list_; }
+
+ // ---------------------------------------------------------------------------
+ // Code generation.
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
+ void LoopHeaderAlign() { CodeTargetAlign(); }
+
+ // Different nop operations are used by the code generator to detect certain
+ // states of the generated code.
+ enum NopMarkerTypes {
+ NON_MARKING_NOP = 0,
+ DEBUG_BREAK_NOP,
+ // IC markers.
+ PROPERTY_ACCESS_INLINED,
+ PROPERTY_ACCESS_INLINED_CONTEXT,
+ PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
+ // Helper values.
+ LAST_CODE_MARKER,
+ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
+ };
+
+ // Type == 0 is the default non-marking nop. For LoongArch this is a
+ // andi(zero_reg, zero_reg, 0).
+ void nop(unsigned int type = 0) {
+ DCHECK_LT(type, 32);
+ andi(zero_reg, zero_reg, type);
+ }
+
+ // --------Branch-and-jump-instructions----------
+ // We don't use likely variant of instructions.
+ void b(int32_t offset);
+ inline void b(Label* L) { b(shifted_branch_offset26(L)); }
+ void bl(int32_t offset);
+ inline void bl(Label* L) { bl(shifted_branch_offset26(L)); }
+
+ void beq(Register rj, Register rd, int32_t offset);
+ inline void beq(Register rj, Register rd, Label* L) {
+ beq(rj, rd, shifted_branch_offset(L));
+ }
+ void bne(Register rj, Register rd, int32_t offset);
+ inline void bne(Register rj, Register rd, Label* L) {
+ bne(rj, rd, shifted_branch_offset(L));
+ }
+ void blt(Register rj, Register rd, int32_t offset);
+ inline void blt(Register rj, Register rd, Label* L) {
+ blt(rj, rd, shifted_branch_offset(L));
+ }
+ void bge(Register rj, Register rd, int32_t offset);
+ inline void bge(Register rj, Register rd, Label* L) {
+ bge(rj, rd, shifted_branch_offset(L));
+ }
+ void bltu(Register rj, Register rd, int32_t offset);
+ inline void bltu(Register rj, Register rd, Label* L) {
+ bltu(rj, rd, shifted_branch_offset(L));
+ }
+ void bgeu(Register rj, Register rd, int32_t offset);
+ inline void bgeu(Register rj, Register rd, Label* L) {
+ bgeu(rj, rd, shifted_branch_offset(L));
+ }
+ void beqz(Register rj, int32_t offset);
+ inline void beqz(Register rj, Label* L) {
+ beqz(rj, shifted_branch_offset21(L));
+ }
+ void bnez(Register rj, int32_t offset);
+ inline void bnez(Register rj, Label* L) {
+ bnez(rj, shifted_branch_offset21(L));
+ }
+
+ void jirl(Register rd, Register rj, int32_t offset);
+
+ void bceqz(CFRegister cj, int32_t si21);
+ inline void bceqz(CFRegister cj, Label* L) {
+ bceqz(cj, shifted_branch_offset21(L));
+ }
+ void bcnez(CFRegister cj, int32_t si21);
+ inline void bcnez(CFRegister cj, Label* L) {
+ bcnez(cj, shifted_branch_offset21(L));
+ }
+
+ // -------Data-processing-instructions---------
+
+ // Arithmetic.
+ void add_w(Register rd, Register rj, Register rk);
+ void add_d(Register rd, Register rj, Register rk);
+ void sub_w(Register rd, Register rj, Register rk);
+ void sub_d(Register rd, Register rj, Register rk);
+
+ void addi_w(Register rd, Register rj, int32_t si12);
+ void addi_d(Register rd, Register rj, int32_t si12);
+
+ void addu16i_d(Register rd, Register rj, int32_t si16);
+
+ void alsl_w(Register rd, Register rj, Register rk, int32_t sa2);
+ void alsl_wu(Register rd, Register rj, Register rk, int32_t sa2);
+ void alsl_d(Register rd, Register rj, Register rk, int32_t sa2);
+
+ void lu12i_w(Register rd, int32_t si20);
+ void lu32i_d(Register rd, int32_t si20);
+ void lu52i_d(Register rd, Register rj, int32_t si12);
+
+ void slt(Register rd, Register rj, Register rk);
+ void sltu(Register rd, Register rj, Register rk);
+ void slti(Register rd, Register rj, int32_t si12);
+ void sltui(Register rd, Register rj, int32_t si12);
+
+ void pcaddi(Register rd, int32_t si20);
+ void pcaddu12i(Register rd, int32_t si20);
+ void pcaddu18i(Register rd, int32_t si20);
+ void pcalau12i(Register rd, int32_t si20);
+
+ void and_(Register rd, Register rj, Register rk);
+ void or_(Register rd, Register rj, Register rk);
+ void xor_(Register rd, Register rj, Register rk);
+ void nor(Register rd, Register rj, Register rk);
+ void andn(Register rd, Register rj, Register rk);
+ void orn(Register rd, Register rj, Register rk);
+
+ void andi(Register rd, Register rj, int32_t ui12);
+ void ori(Register rd, Register rj, int32_t ui12);
+ void xori(Register rd, Register rj, int32_t ui12);
+
+ void mul_w(Register rd, Register rj, Register rk);
+ void mulh_w(Register rd, Register rj, Register rk);
+ void mulh_wu(Register rd, Register rj, Register rk);
+ void mul_d(Register rd, Register rj, Register rk);
+ void mulh_d(Register rd, Register rj, Register rk);
+ void mulh_du(Register rd, Register rj, Register rk);
+
+ void mulw_d_w(Register rd, Register rj, Register rk);
+ void mulw_d_wu(Register rd, Register rj, Register rk);
+
+ void div_w(Register rd, Register rj, Register rk);
+ void mod_w(Register rd, Register rj, Register rk);
+ void div_wu(Register rd, Register rj, Register rk);
+ void mod_wu(Register rd, Register rj, Register rk);
+ void div_d(Register rd, Register rj, Register rk);
+ void mod_d(Register rd, Register rj, Register rk);
+ void div_du(Register rd, Register rj, Register rk);
+ void mod_du(Register rd, Register rj, Register rk);
+
+ // Shifts.
+ void sll_w(Register rd, Register rj, Register rk);
+ void srl_w(Register rd, Register rj, Register rk);
+ void sra_w(Register rd, Register rj, Register rk);
+ void rotr_w(Register rd, Register rj, Register rk);
+
+ void slli_w(Register rd, Register rj, int32_t ui5);
+ void srli_w(Register rd, Register rj, int32_t ui5);
+ void srai_w(Register rd, Register rj, int32_t ui5);
+ void rotri_w(Register rd, Register rj, int32_t ui5);
+
+ void sll_d(Register rd, Register rj, Register rk);
+ void srl_d(Register rd, Register rj, Register rk);
+ void sra_d(Register rd, Register rj, Register rk);
+ void rotr_d(Register rd, Register rj, Register rk);
+
+ void slli_d(Register rd, Register rj, int32_t ui6);
+ void srli_d(Register rd, Register rj, int32_t ui6);
+ void srai_d(Register rd, Register rj, int32_t ui6);
+ void rotri_d(Register rd, Register rj, int32_t ui6);
+
+ // Bit twiddling.
+ void ext_w_b(Register rd, Register rj);
+ void ext_w_h(Register rd, Register rj);
+
+ void clo_w(Register rd, Register rj);
+ void clz_w(Register rd, Register rj);
+ void cto_w(Register rd, Register rj);
+ void ctz_w(Register rd, Register rj);
+ void clo_d(Register rd, Register rj);
+ void clz_d(Register rd, Register rj);
+ void cto_d(Register rd, Register rj);
+ void ctz_d(Register rd, Register rj);
+
+ void bytepick_w(Register rd, Register rj, Register rk, int32_t sa2);
+ void bytepick_d(Register rd, Register rj, Register rk, int32_t sa3);
+
+ void revb_2h(Register rd, Register rj);
+ void revb_4h(Register rd, Register rj);
+ void revb_2w(Register rd, Register rj);
+ void revb_d(Register rd, Register rj);
+
+ void revh_2w(Register rd, Register rj);
+ void revh_d(Register rd, Register rj);
+
+ void bitrev_4b(Register rd, Register rj);
+ void bitrev_8b(Register rd, Register rj);
+
+ void bitrev_w(Register rd, Register rj);
+ void bitrev_d(Register rd, Register rj);
+
+ void bstrins_w(Register rd, Register rj, int32_t msbw, int32_t lsbw);
+ void bstrins_d(Register rd, Register rj, int32_t msbd, int32_t lsbd);
+
+ void bstrpick_w(Register rd, Register rj, int32_t msbw, int32_t lsbw);
+ void bstrpick_d(Register rd, Register rj, int32_t msbd, int32_t lsbd);
+
+ void maskeqz(Register rd, Register rj, Register rk);
+ void masknez(Register rd, Register rj, Register rk);
+
+ // Memory-instructions
+ void ld_b(Register rd, Register rj, int32_t si12);
+ void ld_h(Register rd, Register rj, int32_t si12);
+ void ld_w(Register rd, Register rj, int32_t si12);
+ void ld_d(Register rd, Register rj, int32_t si12);
+ void ld_bu(Register rd, Register rj, int32_t si12);
+ void ld_hu(Register rd, Register rj, int32_t si12);
+ void ld_wu(Register rd, Register rj, int32_t si12);
+ void st_b(Register rd, Register rj, int32_t si12);
+ void st_h(Register rd, Register rj, int32_t si12);
+ void st_w(Register rd, Register rj, int32_t si12);
+ void st_d(Register rd, Register rj, int32_t si12);
+
+ void ldx_b(Register rd, Register rj, Register rk);
+ void ldx_h(Register rd, Register rj, Register rk);
+ void ldx_w(Register rd, Register rj, Register rk);
+ void ldx_d(Register rd, Register rj, Register rk);
+ void ldx_bu(Register rd, Register rj, Register rk);
+ void ldx_hu(Register rd, Register rj, Register rk);
+ void ldx_wu(Register rd, Register rj, Register rk);
+ void stx_b(Register rd, Register rj, Register rk);
+ void stx_h(Register rd, Register rj, Register rk);
+ void stx_w(Register rd, Register rj, Register rk);
+ void stx_d(Register rd, Register rj, Register rk);
+
+ void ldptr_w(Register rd, Register rj, int32_t si14);
+ void ldptr_d(Register rd, Register rj, int32_t si14);
+ void stptr_w(Register rd, Register rj, int32_t si14);
+ void stptr_d(Register rd, Register rj, int32_t si14);
+
+ void amswap_w(Register rd, Register rk, Register rj);
+ void amswap_d(Register rd, Register rk, Register rj);
+ void amadd_w(Register rd, Register rk, Register rj);
+ void amadd_d(Register rd, Register rk, Register rj);
+ void amand_w(Register rd, Register rk, Register rj);
+ void amand_d(Register rd, Register rk, Register rj);
+ void amor_w(Register rd, Register rk, Register rj);
+ void amor_d(Register rd, Register rk, Register rj);
+ void amxor_w(Register rd, Register rk, Register rj);
+ void amxor_d(Register rd, Register rk, Register rj);
+ void ammax_w(Register rd, Register rk, Register rj);
+ void ammax_d(Register rd, Register rk, Register rj);
+ void ammin_w(Register rd, Register rk, Register rj);
+ void ammin_d(Register rd, Register rk, Register rj);
+ void ammax_wu(Register rd, Register rk, Register rj);
+ void ammax_du(Register rd, Register rk, Register rj);
+ void ammin_wu(Register rd, Register rk, Register rj);
+ void ammin_du(Register rd, Register rk, Register rj);
+
+ void amswap_db_w(Register rd, Register rk, Register rj);
+ void amswap_db_d(Register rd, Register rk, Register rj);
+ void amadd_db_w(Register rd, Register rk, Register rj);
+ void amadd_db_d(Register rd, Register rk, Register rj);
+ void amand_db_w(Register rd, Register rk, Register rj);
+ void amand_db_d(Register rd, Register rk, Register rj);
+ void amor_db_w(Register rd, Register rk, Register rj);
+ void amor_db_d(Register rd, Register rk, Register rj);
+ void amxor_db_w(Register rd, Register rk, Register rj);
+ void amxor_db_d(Register rd, Register rk, Register rj);
+ void ammax_db_w(Register rd, Register rk, Register rj);
+ void ammax_db_d(Register rd, Register rk, Register rj);
+ void ammin_db_w(Register rd, Register rk, Register rj);
+ void ammin_db_d(Register rd, Register rk, Register rj);
+ void ammax_db_wu(Register rd, Register rk, Register rj);
+ void ammax_db_du(Register rd, Register rk, Register rj);
+ void ammin_db_wu(Register rd, Register rk, Register rj);
+ void ammin_db_du(Register rd, Register rk, Register rj);
+
+ void ll_w(Register rd, Register rj, int32_t si14);
+ void ll_d(Register rd, Register rj, int32_t si14);
+ void sc_w(Register rd, Register rj, int32_t si14);
+ void sc_d(Register rd, Register rj, int32_t si14);
+
+ void dbar(int32_t hint);
+ void ibar(int32_t hint);
+
+ // Break instruction
+ void break_(uint32_t code, bool break_as_stop = false);
+ void stop(uint32_t code = kMaxStopCode);
+
+ // Arithmetic.
+ void fadd_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fadd_d(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fsub_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fsub_d(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmul_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmul_d(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fdiv_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fdiv_d(FPURegister fd, FPURegister fj, FPURegister fk);
+
+ void fmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fnmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fnmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fnmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fnmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+
+ void fmax_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmax_d(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmin_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmin_d(FPURegister fd, FPURegister fj, FPURegister fk);
+
+ void fmaxa_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmaxa_d(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmina_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmina_d(FPURegister fd, FPURegister fj, FPURegister fk);
+
+ void fabs_s(FPURegister fd, FPURegister fj);
+ void fabs_d(FPURegister fd, FPURegister fj);
+ void fneg_s(FPURegister fd, FPURegister fj);
+ void fneg_d(FPURegister fd, FPURegister fj);
+
+ void fsqrt_s(FPURegister fd, FPURegister fj);
+ void fsqrt_d(FPURegister fd, FPURegister fj);
+ void frecip_s(FPURegister fd, FPURegister fj);
+ void frecip_d(FPURegister fd, FPURegister fj);
+ void frsqrt_s(FPURegister fd, FPURegister fj);
+ void frsqrt_d(FPURegister fd, FPURegister fj);
+
+ void fscaleb_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fscaleb_d(FPURegister fd, FPURegister fj, FPURegister fk);
+ void flogb_s(FPURegister fd, FPURegister fj);
+ void flogb_d(FPURegister fd, FPURegister fj);
+ void fcopysign_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fcopysign_d(FPURegister fd, FPURegister fj, FPURegister fk);
+
+ void fclass_s(FPURegister fd, FPURegister fj);
+ void fclass_d(FPURegister fd, FPURegister fj);
+
+ void fcmp_cond_s(FPUCondition cc, FPURegister fj, FPURegister fk,
+ CFRegister cd);
+ void fcmp_cond_d(FPUCondition cc, FPURegister fj, FPURegister fk,
+ CFRegister cd);
+
+ void fcvt_s_d(FPURegister fd, FPURegister fj);
+ void fcvt_d_s(FPURegister fd, FPURegister fj);
+
+ void ffint_s_w(FPURegister fd, FPURegister fj);
+ void ffint_s_l(FPURegister fd, FPURegister fj);
+ void ffint_d_w(FPURegister fd, FPURegister fj);
+ void ffint_d_l(FPURegister fd, FPURegister fj);
+ void ftint_w_s(FPURegister fd, FPURegister fj);
+ void ftint_w_d(FPURegister fd, FPURegister fj);
+ void ftint_l_s(FPURegister fd, FPURegister fj);
+ void ftint_l_d(FPURegister fd, FPURegister fj);
+
+ void ftintrm_w_s(FPURegister fd, FPURegister fj);
+ void ftintrm_w_d(FPURegister fd, FPURegister fj);
+ void ftintrm_l_s(FPURegister fd, FPURegister fj);
+ void ftintrm_l_d(FPURegister fd, FPURegister fj);
+ void ftintrp_w_s(FPURegister fd, FPURegister fj);
+ void ftintrp_w_d(FPURegister fd, FPURegister fj);
+ void ftintrp_l_s(FPURegister fd, FPURegister fj);
+ void ftintrp_l_d(FPURegister fd, FPURegister fj);
+ void ftintrz_w_s(FPURegister fd, FPURegister fj);
+ void ftintrz_w_d(FPURegister fd, FPURegister fj);
+ void ftintrz_l_s(FPURegister fd, FPURegister fj);
+ void ftintrz_l_d(FPURegister fd, FPURegister fj);
+ void ftintrne_w_s(FPURegister fd, FPURegister fj);
+ void ftintrne_w_d(FPURegister fd, FPURegister fj);
+ void ftintrne_l_s(FPURegister fd, FPURegister fj);
+ void ftintrne_l_d(FPURegister fd, FPURegister fj);
+
+ void frint_s(FPURegister fd, FPURegister fj);
+ void frint_d(FPURegister fd, FPURegister fj);
+
+ void fmov_s(FPURegister fd, FPURegister fj);
+ void fmov_d(FPURegister fd, FPURegister fj);
+
+ void fsel(CFRegister ca, FPURegister fd, FPURegister fj, FPURegister fk);
+
+ void movgr2fr_w(FPURegister fd, Register rj);
+ void movgr2fr_d(FPURegister fd, Register rj);
+ void movgr2frh_w(FPURegister fd, Register rj);
+
+ void movfr2gr_s(Register rd, FPURegister fj);
+ void movfr2gr_d(Register rd, FPURegister fj);
+ void movfrh2gr_s(Register rd, FPURegister fj);
+
+ void movgr2fcsr(Register rj, FPUControlRegister fcsr = FCSR0);
+ void movfcsr2gr(Register rd, FPUControlRegister fcsr = FCSR0);
+
+ void movfr2cf(CFRegister cd, FPURegister fj);
+ void movcf2fr(FPURegister fd, CFRegister cj);
+
+ void movgr2cf(CFRegister cd, Register rj);
+ void movcf2gr(Register rd, CFRegister cj);
+
+ void fld_s(FPURegister fd, Register rj, int32_t si12);
+ void fld_d(FPURegister fd, Register rj, int32_t si12);
+ void fst_s(FPURegister fd, Register rj, int32_t si12);
+ void fst_d(FPURegister fd, Register rj, int32_t si12);
+
+ void fldx_s(FPURegister fd, Register rj, Register rk);
+ void fldx_d(FPURegister fd, Register rj, Register rk);
+ void fstx_s(FPURegister fd, Register rj, Register rk);
+ void fstx_d(FPURegister fd, Register rj, Register rk);
+
+ // Check the code size generated from label to here.
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
+
+ // Check the number of instructions generated from label to here.
+ int InstructionsGeneratedSince(Label* label) {
+ return SizeOfCodeGeneratedSince(label) / kInstrSize;
+ }
+
+ // Class for scoping postponing the trampoline pool generation.
+ class V8_NODISCARD BlockTrampolinePoolScope {
+ public:
+ explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockTrampolinePool();
+ }
+ ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
+ };
+
+ // Class for postponing the assembly buffer growth. Typically used for
+ // sequences of instructions that must be emitted as a unit, before
+ // buffer growth (and relocation) can occur.
+ // This blocking scope is not nestable.
+ class V8_NODISCARD BlockGrowBufferScope {
+ public:
+ explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockGrowBuffer();
+ }
+ ~BlockGrowBufferScope() { assem_->EndBlockGrowBuffer(); }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
+ };
+
+ // Record a deoptimization reason that can be used by a log or cpu profiler.
+ // Use --trace-deopt to enable.
+ void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id,
+ SourcePosition position, int id);
+
+ static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta);
+ static void RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta);
+
+ // Writes a single byte or word of data in the code stream. Used for
+ // inline tables, e.g., jump-tables.
+ void db(uint8_t data);
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ dq(data, rmode);
+ }
+ void dd(Label* label);
+
+ // Postpone the generation of the trampoline pool for the specified number of
+ // instructions.
+ void BlockTrampolinePoolFor(int instructions);
+
+ // Check if there is less than kGap bytes available in the buffer.
+ // If this is the case, we need to grow the buffer before emitting
+ // an instruction or relocation information.
+ inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+
+ // Get the number of bytes available in the buffer.
+ inline intptr_t available_space() const {
+ return reloc_info_writer.pos() - pc_;
+ }
+
+ // Read/patch instructions.
+ static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); }
+ static void instr_at_put(Address pc, Instr instr) {
+ *reinterpret_cast<Instr*>(pc) = instr;
+ }
+ Instr instr_at(int pos) {
+ return *reinterpret_cast<Instr*>(buffer_start_ + pos);
+ }
+ void instr_at_put(int pos, Instr instr) {
+ *reinterpret_cast<Instr*>(buffer_start_ + pos) = instr;
+ }
+
+ // Check if an instruction is a branch of some kind.
+ static bool IsBranch(Instr instr);
+ static bool IsB(Instr instr);
+ static bool IsBz(Instr instr);
+ static bool IsNal(Instr instr);
+
+ static bool IsBeq(Instr instr);
+ static bool IsBne(Instr instr);
+
+ static bool IsJump(Instr instr);
+ static bool IsMov(Instr instr, Register rd, Register rs);
+ static bool IsPcAddi(Instr instr, Register rd, int32_t si20);
+
+ static bool IsJ(Instr instr);
+ static bool IsLu12i_w(Instr instr);
+ static bool IsOri(Instr instr);
+ static bool IsLu32i_d(Instr instr);
+ static bool IsLu52i_d(Instr instr);
+
+ static bool IsNop(Instr instr, unsigned int type);
+
+ static Register GetRjReg(Instr instr);
+ static Register GetRkReg(Instr instr);
+ static Register GetRdReg(Instr instr);
+
+ static uint32_t GetRj(Instr instr);
+ static uint32_t GetRjField(Instr instr);
+ static uint32_t GetRk(Instr instr);
+ static uint32_t GetRkField(Instr instr);
+ static uint32_t GetRd(Instr instr);
+ static uint32_t GetRdField(Instr instr);
+ static uint32_t GetSa2(Instr instr);
+ static uint32_t GetSa3(Instr instr);
+ static uint32_t GetSa2Field(Instr instr);
+ static uint32_t GetSa3Field(Instr instr);
+ static uint32_t GetOpcodeField(Instr instr);
+ static uint32_t GetFunction(Instr instr);
+ static uint32_t GetFunctionField(Instr instr);
+ static uint32_t GetImmediate16(Instr instr);
+ static uint32_t GetLabelConst(Instr instr);
+
+ static bool IsAddImmediate(Instr instr);
+ static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
+
+ static bool IsAndImmediate(Instr instr);
+ static bool IsEmittedConstant(Instr instr);
+
+ void CheckTrampolinePool();
+
+ // Get the code target object for a pc-relative call or jump.
+ V8_INLINE Handle<Code> relative_code_target_object_handle_at(
+ Address pc_) const;
+
+ inline int UnboundLabelsCount() { return unbound_labels_count_; }
+
+ protected:
+ // Helper function for memory load/store.
+ void AdjustBaseAndOffset(MemOperand* src);
+
+ inline static void set_target_internal_reference_encoded_at(Address pc,
+ Address target);
+
+ int64_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+ // Decode branch instruction at pos and return branch target pos.
+ int target_at(int pos, bool is_internal);
+
+ // Patch branch instruction at pos to branch to given branch target pos.
+ void target_at_put(int pos, int target_pos, bool is_internal);
+
+ // Say if we need to relocate with this mode.
+ bool MustUseReg(RelocInfo::Mode rmode);
+
+ // Record reloc info for current pc_.
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ // Block the emission of the trampoline pool before pc_offset.
+ void BlockTrampolinePoolBefore(int pc_offset) {
+ if (no_trampoline_pool_before_ < pc_offset)
+ no_trampoline_pool_before_ = pc_offset;
+ }
+
+ void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; }
+
+ void EndBlockTrampolinePool() {
+ trampoline_pool_blocked_nesting_--;
+ if (trampoline_pool_blocked_nesting_ == 0) {
+ CheckTrampolinePoolQuick(1);
+ }
+ }
+
+ bool is_trampoline_pool_blocked() const {
+ return trampoline_pool_blocked_nesting_ > 0;
+ }
+
+ bool has_exception() const { return internal_trampoline_exception_; }
+
+ bool is_trampoline_emitted() const { return trampoline_emitted_; }
+
+ // Temporarily block automatic assembly buffer growth.
+ void StartBlockGrowBuffer() {
+ DCHECK(!block_buffer_growth_);
+ block_buffer_growth_ = true;
+ }
+
+ void EndBlockGrowBuffer() {
+ DCHECK(block_buffer_growth_);
+ block_buffer_growth_ = false;
+ }
+
+ bool is_buffer_growth_blocked() const { return block_buffer_growth_; }
+
+ void CheckTrampolinePoolQuick(int extra_instructions = 0) {
+ if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) {
+ CheckTrampolinePool();
+ }
+ }
+
+ void set_last_call_pc_(byte* pc) { last_call_pc_ = pc; }
+
+#ifdef DEBUG
+ bool EmbeddedObjectMatches(int pc_offset, Handle<Object> object) {
+ return target_address_at(
+ reinterpret_cast<Address>(buffer_->start() + pc_offset)) ==
+ (IsOnHeap() ? object->ptr() : object.address());
+ }
+#endif
+
+ private:
+ // Avoid overflows for displacements etc.
+ static const int kMaximalBufferSize = 512 * MB;
+
+ // Buffer size and constant pool distance are checked together at regular
+ // intervals of kBufferCheckInterval emitted bytes.
+ static constexpr int kBufferCheckInterval = 1 * KB / 2;
+
+ // Code generation.
+ // The relocation writer's position is at least kGap bytes below the end of
+ // the generated instructions. This is so that multi-instruction sequences do
+ // not have to check for overflow. The same is true for writes of large
+ // relocation info entries.
+ static constexpr int kGap = 64;
+ STATIC_ASSERT(AssemblerBase::kMinimalBufferSize >= 2 * kGap);
+
+ // Repeated checking whether the trampoline pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated.
+ static constexpr int kCheckConstIntervalInst = 32;
+ static constexpr int kCheckConstInterval =
+ kCheckConstIntervalInst * kInstrSize;
+
+ int next_buffer_check_; // pc offset of next buffer check.
+
+ // Emission of the trampoline pool may be blocked in some code sequences.
+ int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
+ int no_trampoline_pool_before_; // Block emission before this pc offset.
+
+ // Keep track of the last emitted pool to guarantee a maximal distance.
+ int last_trampoline_pool_end_; // pc offset of the end of the last pool.
+
+ // Automatic growth of the assembly buffer may be blocked for some sequences.
+ bool block_buffer_growth_; // Block growth when true.
+
+ // Relocation information generation.
+ // Each relocation is encoded as a variable size value.
+ static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ RelocInfoWriter reloc_info_writer;
+
+ // The bound position, before this we cannot do instruction elimination.
+ int last_bound_pos_;
+
+ // Code emission.
+ inline void CheckBuffer();
+ void GrowBuffer();
+ inline void emit(Instr x);
+ inline void emit(uint64_t x);
+ template <typename T>
+ inline void EmitHelper(T x);
+ inline void EmitHelper(Instr x);
+
+ void GenB(Opcode opcode, Register rj, int32_t si21); // opcode:6
+ void GenB(Opcode opcode, CFRegister cj, int32_t si21, bool isEq);
+ void GenB(Opcode opcode, int32_t si26);
+ void GenBJ(Opcode opcode, Register rj, Register rd, int32_t si16);
+ void GenCmp(Opcode opcode, FPUCondition cond, FPURegister fk, FPURegister fj,
+ CFRegister cd);
+ void GenSel(Opcode opcode, CFRegister ca, FPURegister fk, FPURegister fj,
+ FPURegister rd);
+
+ void GenRegister(Opcode opcode, Register rj, Register rd, bool rjrd = true);
+ void GenRegister(Opcode opcode, FPURegister fj, FPURegister fd);
+ void GenRegister(Opcode opcode, Register rj, FPURegister fd);
+ void GenRegister(Opcode opcode, FPURegister fj, Register rd);
+ void GenRegister(Opcode opcode, Register rj, FPUControlRegister fd);
+ void GenRegister(Opcode opcode, FPUControlRegister fj, Register rd);
+ void GenRegister(Opcode opcode, FPURegister fj, CFRegister cd);
+ void GenRegister(Opcode opcode, CFRegister cj, FPURegister fd);
+ void GenRegister(Opcode opcode, Register rj, CFRegister cd);
+ void GenRegister(Opcode opcode, CFRegister cj, Register rd);
+
+ void GenRegister(Opcode opcode, Register rk, Register rj, Register rd);
+ void GenRegister(Opcode opcode, FPURegister fk, FPURegister fj,
+ FPURegister fd);
+
+ void GenRegister(Opcode opcode, FPURegister fa, FPURegister fk,
+ FPURegister fj, FPURegister fd);
+ void GenRegister(Opcode opcode, Register rk, Register rj, FPURegister fd);
+
+ void GenImm(Opcode opcode, int32_t bit3, Register rk, Register rj,
+ Register rd);
+ void GenImm(Opcode opcode, int32_t bit6m, int32_t bit6l, Register rj,
+ Register rd);
+ void GenImm(Opcode opcode, int32_t bit20, Register rd);
+ void GenImm(Opcode opcode, int32_t bit15);
+ void GenImm(Opcode opcode, int32_t value, Register rj, Register rd,
+ int32_t value_bits); // 6 | 12 | 14 | 16
+ void GenImm(Opcode opcode, int32_t bit12, Register rj, FPURegister fd);
+
+ // Labels.
+ void print(const Label* L);
+ void bind_to(Label* L, int pos);
+ void next(Label* L, bool is_internal);
+
+ // One trampoline consists of:
+ // - space for trampoline slots,
+ // - space for labels.
+ //
+ // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
+ // Space for trampoline slots precedes space for labels. Each label is of one
+ // instruction size, so total amount for labels is equal to
+ // label_count * kInstrSize.
+ class Trampoline {
+ public:
+ Trampoline() {
+ start_ = 0;
+ next_slot_ = 0;
+ free_slot_count_ = 0;
+ end_ = 0;
+ }
+ Trampoline(int start, int slot_count) {
+ start_ = start;
+ next_slot_ = start;
+ free_slot_count_ = slot_count;
+ end_ = start + slot_count * kTrampolineSlotsSize;
+ }
+ int start() { return start_; }
+ int end() { return end_; }
+ int take_slot() {
+ int trampoline_slot = kInvalidSlotPos;
+ if (free_slot_count_ <= 0) {
+ // We have run out of space on trampolines.
+ // Make sure we fail in debug mode, so we become aware of each case
+ // when this happens.
+ DCHECK(0);
+ // Internal exception will be caught.
+ } else {
+ trampoline_slot = next_slot_;
+ free_slot_count_--;
+ next_slot_ += kTrampolineSlotsSize;
+ }
+ return trampoline_slot;
+ }
+
+ private:
+ int start_;
+ int end_;
+ int next_slot_;
+ int free_slot_count_;
+ };
+
+ int32_t get_trampoline_entry(int32_t pos);
+ int unbound_labels_count_;
+ // After trampoline is emitted, long branches are used in generated code for
+ // the forward branches whose target offsets could be beyond reach of branch
+ // instruction. We use this information to trigger different mode of
+ // branch instruction generation, where we use jump instructions rather
+ // than regular branch instructions.
+ bool trampoline_emitted_;
+ static constexpr int kInvalidSlotPos = -1;
+
+ // Internal reference positions, required for unbounded internal reference
+ // labels.
+ std::set<int64_t> internal_reference_positions_;
+ bool is_internal_reference(Label* L) {
+ return internal_reference_positions_.find(L->pos()) !=
+ internal_reference_positions_.end();
+ }
+
+ void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
+ void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
+ bool prev_instr_compact_branch_ = false;
+
+ Trampoline trampoline_;
+ bool internal_trampoline_exception_;
+
+ // Keep track of the last Call's position to ensure that safepoint can get the
+ // correct information even if there is a trampoline immediately after the
+ // Call.
+ byte* last_call_pc_;
+
+ RegList scratch_register_list_;
+
+ private:
+ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+
+ int WriteCodeComments();
+
+ friend class RegExpMacroAssemblerLOONG64;
+ friend class RelocInfo;
+ friend class BlockTrampolinePoolScope;
+ friend class EnsureSpace;
+};
+
+class EnsureSpace {
+ public:
+ explicit inline EnsureSpace(Assembler* assembler);
+};
+
+class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
+ public:
+ explicit UseScratchRegisterScope(Assembler* assembler);
+ ~UseScratchRegisterScope();
+
+ Register Acquire();
+ bool hasAvailable() const;
+
+ void Include(const RegList& list) { *available_ |= list; }
+ void Exclude(const RegList& list) { *available_ &= ~list; }
+ void Include(const Register& reg1, const Register& reg2 = no_reg) {
+ RegList list(reg1.bit() | reg2.bit());
+ Include(list);
+ }
+ void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
+ RegList list(reg1.bit() | reg2.bit());
+ Exclude(list);
+ }
+
+ private:
+ RegList* available_;
+ RegList old_available_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_H_
diff --git a/deps/v8/src/codegen/loong64/constants-loong64.cc b/deps/v8/src/codegen/loong64/constants-loong64.cc
new file mode 100644
index 0000000000..3f887a50fe
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/constants-loong64.cc
@@ -0,0 +1,100 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/codegen/loong64/constants-loong64.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Registers.
+
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* Registers::names_[kNumSimuRegisters] = {
+ "zero_reg", "ra", "tp", "sp", "a0", "a1", "a2", "a3", "a4", "a5", "a6",
+ "a7", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "x_reg",
+ "fp", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "pc"};
+
+// List of alias names which can be used when referring to registers.
+const Registers::RegisterAlias Registers::aliases_[] = {
+ {0, "zero"}, {30, "cp"}, {kInvalidRegister, nullptr}};
+
+const char* Registers::Name(int reg) {
+ const char* result;
+ if ((0 <= reg) && (reg < kNumSimuRegisters)) {
+ result = names_[reg];
+ } else {
+ result = "noreg";
+ }
+ return result;
+}
+
+int Registers::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumSimuRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].reg != kInvalidRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].reg;
+ }
+ i++;
+ }
+
+ // No register with the reguested name found.
+ return kInvalidRegister;
+}
+
+const char* FPURegisters::names_[kNumFPURegisters] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10",
+ "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
+ "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
+
+// List of alias names which can be used when referring to LoongArch registers.
+const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
+ {kInvalidRegister, nullptr}};
+
+const char* FPURegisters::Name(int creg) {
+ const char* result;
+ if ((0 <= creg) && (creg < kNumFPURegisters)) {
+ result = names_[creg];
+ } else {
+ result = "nocreg";
+ }
+ return result;
+}
+
+int FPURegisters::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumFPURegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].creg != kInvalidRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].creg;
+ }
+ i++;
+ }
+
+ // No Cregister with the reguested name found.
+ return kInvalidFPURegister;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/deps/v8/src/codegen/loong64/constants-loong64.h b/deps/v8/src/codegen/loong64/constants-loong64.h
new file mode 100644
index 0000000000..394c5dc6ab
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/constants-loong64.h
@@ -0,0 +1,1291 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_LOONG64_CONSTANTS_LOONG64_H_
+#define V8_CODEGEN_LOONG64_CONSTANTS_LOONG64_H_
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+
+// Get the standard printf format macros for C99 stdint types.
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+#include <inttypes.h>
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate LOONG64 instructions.
+
+namespace v8 {
+namespace internal {
+
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 128;
+
+// -----------------------------------------------------------------------------
+// Registers and FPURegisters.
+
+// Number of general purpose registers.
+const int kNumRegisters = 32;
+const int kInvalidRegister = -1;
+
+// Number of registers with pc.
+const int kNumSimuRegisters = 33;
+
+// In the simulator, the PC register is simulated as the 33th register.
+const int kPCRegister = 32;
+
+// Number of floating point registers.
+const int kNumFPURegisters = 32;
+const int kInvalidFPURegister = -1;
+
+// FPU control registers.
+const int kFCSRRegister = 0;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1u << 31) - 1;
+const int32_t kFPUInvalidResultNegative = static_cast<int32_t>(1u << 31);
+const uint64_t kFPU64InvalidResult =
+ static_cast<uint64_t>(static_cast<uint64_t>(1) << 63) - 1;
+const int64_t kFPU64InvalidResultNegative =
+ static_cast<int64_t>(static_cast<uint64_t>(1) << 63);
+
+// FCSR constants.
+const uint32_t kFCSRInexactCauseBit = 24;
+const uint32_t kFCSRUnderflowCauseBit = 25;
+const uint32_t kFCSROverflowCauseBit = 26;
+const uint32_t kFCSRDivideByZeroCauseBit = 27;
+const uint32_t kFCSRInvalidOpCauseBit = 28;
+
+const uint32_t kFCSRInexactCauseMask = 1 << kFCSRInexactCauseBit;
+const uint32_t kFCSRUnderflowCauseMask = 1 << kFCSRUnderflowCauseBit;
+const uint32_t kFCSROverflowCauseMask = 1 << kFCSROverflowCauseBit;
+const uint32_t kFCSRDivideByZeroCauseMask = 1 << kFCSRDivideByZeroCauseBit;
+const uint32_t kFCSRInvalidOpCauseMask = 1 << kFCSRInvalidOpCauseBit;
+
+const uint32_t kFCSRCauseMask =
+ kFCSRInexactCauseMask | kFCSRUnderflowCauseMask | kFCSROverflowCauseMask |
+ kFCSRDivideByZeroCauseMask | kFCSRInvalidOpCauseMask;
+
+const uint32_t kFCSRExceptionCauseMask = kFCSRCauseMask ^ kFCSRInexactCauseMask;
+
+// Actual value of root register is offset from the root array's start
+// to take advantage of negative displacement values.
+// TODO(sigurds): Choose best value.
+constexpr int kRootRegisterBias = 256;
+
+// Helper functions for converting between register numbers and names.
+class Registers {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int reg;
+ const char* name;
+ };
+
+ static const int64_t kMaxValue = 0x7fffffffffffffffl;
+ static const int64_t kMinValue = 0x8000000000000000l;
+
+ private:
+ static const char* names_[kNumSimuRegisters];
+ static const RegisterAlias aliases_[];
+};
+
+// Helper functions for converting between register numbers and names.
+class FPURegisters {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int creg;
+ const char* name;
+ };
+
+ private:
+ static const char* names_[kNumFPURegisters];
+ static const RegisterAlias aliases_[];
+};
+
+// -----------------------------------------------------------------------------
+// Instructions encoding constants.
+
+// On LoongArch all instructions are 32 bits.
+using Instr = int32_t;
+
+// Special Software Interrupt codes when used in the presence of the LOONG64
+// simulator.
+enum SoftwareInterruptCodes {
+ // Transition to C code.
+ call_rt_redirected = 0x7fff
+};
+
+// On LOONG64 Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
+STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
+
+// ----- Fields offset and length.
+const int kRjShift = 5;
+const int kRjBits = 5;
+const int kRkShift = 10;
+const int kRkBits = 5;
+const int kRdShift = 0;
+const int kRdBits = 5;
+const int kSaShift = 15;
+const int kSa2Bits = 2;
+const int kSa3Bits = 3;
+const int kCdShift = 0;
+const int kCdBits = 3;
+const int kCjShift = 5;
+const int kCjBits = 3;
+const int kCodeShift = 0;
+const int kCodeBits = 15;
+const int kCondShift = 15;
+const int kCondBits = 5;
+const int kUi5Shift = 10;
+const int kUi5Bits = 5;
+const int kUi6Shift = 10;
+const int kUi6Bits = 6;
+const int kUi12Shift = 10;
+const int kUi12Bits = 12;
+const int kSi12Shift = 10;
+const int kSi12Bits = 12;
+const int kSi14Shift = 10;
+const int kSi14Bits = 14;
+const int kSi16Shift = 10;
+const int kSi16Bits = 16;
+const int kSi20Shift = 5;
+const int kSi20Bits = 20;
+const int kMsbwShift = 16;
+const int kMsbwBits = 5;
+const int kLsbwShift = 10;
+const int kLsbwBits = 5;
+const int kMsbdShift = 16;
+const int kMsbdBits = 6;
+const int kLsbdShift = 10;
+const int kLsbdBits = 6;
+const int kFdShift = 0;
+const int kFdBits = 5;
+const int kFjShift = 5;
+const int kFjBits = 5;
+const int kFkShift = 10;
+const int kFkBits = 5;
+const int kFaShift = 15;
+const int kFaBits = 5;
+const int kCaShift = 15;
+const int kCaBits = 3;
+const int kHint15Shift = 0;
+const int kHint15Bits = 15;
+const int kHint5Shift = 0;
+const int kHint5Bits = 5;
+const int kOffsLowShift = 10;
+const int kOffsLowBits = 16;
+const int kOffs26HighShift = 0;
+const int kOffs26HighBits = 10;
+const int kOffs21HighShift = 0;
+const int kOffs21HighBits = 5;
+const int kImm12Shift = 0;
+const int kImm12Bits = 12;
+const int kImm16Shift = 0;
+const int kImm16Bits = 16;
+const int kImm26Shift = 0;
+const int kImm26Bits = 26;
+const int kImm28Shift = 0;
+const int kImm28Bits = 28;
+const int kImm32Shift = 0;
+const int kImm32Bits = 32;
+
+// ----- Miscellaneous useful masks.
+// Instruction bit masks.
+const int kRjFieldMask = ((1 << kRjBits) - 1) << kRjShift;
+const int kRkFieldMask = ((1 << kRkBits) - 1) << kRkShift;
+const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
+const int kSa2FieldMask = ((1 << kSa2Bits) - 1) << kSaShift;
+const int kSa3FieldMask = ((1 << kSa3Bits) - 1) << kSaShift;
+// Misc masks.
+const int kHiMaskOf32 = 0xffff << 16; // Only to be used with 32-bit values
+const int kLoMaskOf32 = 0xffff;
+const int kSignMaskOf32 = 0x80000000; // Only to be used with 32-bit values
+const int64_t kTop16MaskOf64 = (int64_t)0xffff << 48;
+const int64_t kHigher16MaskOf64 = (int64_t)0xffff << 32;
+const int64_t kUpper16MaskOf64 = (int64_t)0xffff << 16;
+
+const int kImm12Mask = ((1 << kImm12Bits) - 1) << kImm12Shift;
+const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
+const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
+const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
+
+// ----- LOONG64 Opcodes and Function Fields.
+enum Opcode : uint32_t {
+ BEQZ = 0x10U << 26,
+ BNEZ = 0x11U << 26,
+ BCZ = 0x12U << 26, // BCEQZ & BCNEZ
+ JIRL = 0x13U << 26,
+ B = 0x14U << 26,
+ BL = 0x15U << 26,
+ BEQ = 0x16U << 26,
+ BNE = 0x17U << 26,
+ BLT = 0x18U << 26,
+ BGE = 0x19U << 26,
+ BLTU = 0x1aU << 26,
+ BGEU = 0x1bU << 26,
+
+ ADDU16I_D = 0x4U << 26,
+
+ LU12I_W = 0xaU << 25,
+ LU32I_D = 0xbU << 25,
+ PCADDI = 0xcU << 25,
+ PCALAU12I = 0xdU << 25,
+ PCADDU12I = 0xeU << 25,
+ PCADDU18I = 0xfU << 25,
+
+ LL_W = 0x20U << 24,
+ SC_W = 0x21U << 24,
+ LL_D = 0x22U << 24,
+ SC_D = 0x23U << 24,
+ LDPTR_W = 0x24U << 24,
+ STPTR_W = 0x25U << 24,
+ LDPTR_D = 0x26U << 24,
+ STPTR_D = 0x27U << 24,
+
+ BSTR_W = 0x1U << 22, // BSTRINS_W & BSTRPICK_W
+ BSTRINS_W = BSTR_W,
+ BSTRPICK_W = BSTR_W,
+ BSTRINS_D = 0x2U << 22,
+ BSTRPICK_D = 0x3U << 22,
+
+ SLTI = 0x8U << 22,
+ SLTUI = 0x9U << 22,
+ ADDI_W = 0xaU << 22,
+ ADDI_D = 0xbU << 22,
+ LU52I_D = 0xcU << 22,
+ ANDI = 0xdU << 22,
+ ORI = 0xeU << 22,
+ XORI = 0xfU << 22,
+
+ LD_B = 0xa0U << 22,
+ LD_H = 0xa1U << 22,
+ LD_W = 0xa2U << 22,
+ LD_D = 0xa3U << 22,
+ ST_B = 0xa4U << 22,
+ ST_H = 0xa5U << 22,
+ ST_W = 0xa6U << 22,
+ ST_D = 0xa7U << 22,
+ LD_BU = 0xa8U << 22,
+ LD_HU = 0xa9U << 22,
+ LD_WU = 0xaaU << 22,
+ FLD_S = 0xacU << 22,
+ FST_S = 0xadU << 22,
+ FLD_D = 0xaeU << 22,
+ FST_D = 0xafU << 22,
+
+ FMADD_S = 0x81U << 20,
+ FMADD_D = 0x82U << 20,
+ FMSUB_S = 0x85U << 20,
+ FMSUB_D = 0x86U << 20,
+ FNMADD_S = 0x89U << 20,
+ FNMADD_D = 0x8aU << 20,
+ FNMSUB_S = 0x8dU << 20,
+ FNMSUB_D = 0x8eU << 20,
+ FCMP_COND_S = 0xc1U << 20,
+ FCMP_COND_D = 0xc2U << 20,
+
+ BYTEPICK_D = 0x3U << 18,
+ BYTEPICK_W = 0x2U << 18,
+
+ FSEL = 0x340U << 18,
+
+ ALSL = 0x1U << 18,
+ ALSL_W = ALSL,
+ ALSL_WU = ALSL,
+
+ ALSL_D = 0xbU << 18,
+
+ SLLI_W = 0x40U << 16,
+ SRLI_W = 0x44U << 16,
+ SRAI_W = 0x48U << 16,
+ ROTRI_W = 0x4cU << 16,
+
+ SLLI_D = 0x41U << 16,
+ SRLI_D = 0x45U << 16,
+ SRAI_D = 0x49U << 16,
+ ROTRI_D = 0x4dU << 16,
+
+ SLLI = 0x10U << 18,
+ SRLI = 0x11U << 18,
+ SRAI = 0x12U << 18,
+ ROTRI = 0x13U << 18,
+
+ ADD_W = 0x20U << 15,
+ ADD_D = 0x21U << 15,
+ SUB_W = 0x22U << 15,
+ SUB_D = 0x23U << 15,
+ SLT = 0x24U << 15,
+ SLTU = 0x25U << 15,
+ MASKNEZ = 0x26U << 15,
+ MASKEQZ = 0x27U << 15,
+ NOR = 0x28U << 15,
+ AND = 0x29U << 15,
+ OR = 0x2aU << 15,
+ XOR = 0x2bU << 15,
+ ORN = 0x2cU << 15,
+ ANDN = 0x2dU << 15,
+ SLL_W = 0x2eU << 15,
+ SRL_W = 0x2fU << 15,
+ SRA_W = 0x30U << 15,
+ SLL_D = 0x31U << 15,
+ SRL_D = 0x32U << 15,
+ SRA_D = 0x33U << 15,
+ ROTR_W = 0x36U << 15,
+ ROTR_D = 0x37U << 15,
+ MUL_W = 0x38U << 15,
+ MULH_W = 0x39U << 15,
+ MULH_WU = 0x3aU << 15,
+ MUL_D = 0x3bU << 15,
+ MULH_D = 0x3cU << 15,
+ MULH_DU = 0x3dU << 15,
+ MULW_D_W = 0x3eU << 15,
+ MULW_D_WU = 0x3fU << 15,
+
+ DIV_W = 0x40U << 15,
+ MOD_W = 0x41U << 15,
+ DIV_WU = 0x42U << 15,
+ MOD_WU = 0x43U << 15,
+ DIV_D = 0x44U << 15,
+ MOD_D = 0x45U << 15,
+ DIV_DU = 0x46U << 15,
+ MOD_DU = 0x47U << 15,
+
+ BREAK = 0x54U << 15,
+
+ FADD_S = 0x201U << 15,
+ FADD_D = 0x202U << 15,
+ FSUB_S = 0x205U << 15,
+ FSUB_D = 0x206U << 15,
+ FMUL_S = 0x209U << 15,
+ FMUL_D = 0x20aU << 15,
+ FDIV_S = 0x20dU << 15,
+ FDIV_D = 0x20eU << 15,
+ FMAX_S = 0x211U << 15,
+ FMAX_D = 0x212U << 15,
+ FMIN_S = 0x215U << 15,
+ FMIN_D = 0x216U << 15,
+ FMAXA_S = 0x219U << 15,
+ FMAXA_D = 0x21aU << 15,
+ FMINA_S = 0x21dU << 15,
+ FMINA_D = 0x21eU << 15,
+ FSCALEB_S = 0x221U << 15,
+ FSCALEB_D = 0x222U << 15,
+ FCOPYSIGN_S = 0x225U << 15,
+ FCOPYSIGN_D = 0x226U << 15,
+
+ LDX_B = 0x7000U << 15,
+ LDX_H = 0x7008U << 15,
+ LDX_W = 0x7010U << 15,
+ LDX_D = 0x7018U << 15,
+ STX_B = 0x7020U << 15,
+ STX_H = 0x7028U << 15,
+ STX_W = 0x7030U << 15,
+ STX_D = 0x7038U << 15,
+ LDX_BU = 0x7040U << 15,
+ LDX_HU = 0x7048U << 15,
+ LDX_WU = 0x7050U << 15,
+ FLDX_S = 0x7060U << 15,
+ FLDX_D = 0x7068U << 15,
+ FSTX_S = 0x7070U << 15,
+ FSTX_D = 0x7078U << 15,
+
+ AMSWAP_W = 0x70c0U << 15,
+ AMSWAP_D = 0x70c1U << 15,
+ AMADD_W = 0x70c2U << 15,
+ AMADD_D = 0x70c3U << 15,
+ AMAND_W = 0x70c4U << 15,
+ AMAND_D = 0x70c5U << 15,
+ AMOR_W = 0x70c6U << 15,
+ AMOR_D = 0x70c7U << 15,
+ AMXOR_W = 0x70c8U << 15,
+ AMXOR_D = 0x70c9U << 15,
+ AMMAX_W = 0x70caU << 15,
+ AMMAX_D = 0x70cbU << 15,
+ AMMIN_W = 0x70ccU << 15,
+ AMMIN_D = 0x70cdU << 15,
+ AMMAX_WU = 0x70ceU << 15,
+ AMMAX_DU = 0x70cfU << 15,
+ AMMIN_WU = 0x70d0U << 15,
+ AMMIN_DU = 0x70d1U << 15,
+ AMSWAP_DB_W = 0x70d2U << 15,
+ AMSWAP_DB_D = 0x70d3U << 15,
+ AMADD_DB_W = 0x70d4U << 15,
+ AMADD_DB_D = 0x70d5U << 15,
+ AMAND_DB_W = 0x70d6U << 15,
+ AMAND_DB_D = 0x70d7U << 15,
+ AMOR_DB_W = 0x70d8U << 15,
+ AMOR_DB_D = 0x70d9U << 15,
+ AMXOR_DB_W = 0x70daU << 15,
+ AMXOR_DB_D = 0x70dbU << 15,
+ AMMAX_DB_W = 0x70dcU << 15,
+ AMMAX_DB_D = 0x70ddU << 15,
+ AMMIN_DB_W = 0x70deU << 15,
+ AMMIN_DB_D = 0x70dfU << 15,
+ AMMAX_DB_WU = 0x70e0U << 15,
+ AMMAX_DB_DU = 0x70e1U << 15,
+ AMMIN_DB_WU = 0x70e2U << 15,
+ AMMIN_DB_DU = 0x70e3U << 15,
+
+ DBAR = 0x70e4U << 15,
+ IBAR = 0x70e5U << 15,
+
+ CLO_W = 0X4U << 10,
+ CLZ_W = 0X5U << 10,
+ CTO_W = 0X6U << 10,
+ CTZ_W = 0X7U << 10,
+ CLO_D = 0X8U << 10,
+ CLZ_D = 0X9U << 10,
+ CTO_D = 0XaU << 10,
+ CTZ_D = 0XbU << 10,
+ REVB_2H = 0XcU << 10,
+ REVB_4H = 0XdU << 10,
+ REVB_2W = 0XeU << 10,
+ REVB_D = 0XfU << 10,
+ REVH_2W = 0X10U << 10,
+ REVH_D = 0X11U << 10,
+ BITREV_4B = 0X12U << 10,
+ BITREV_8B = 0X13U << 10,
+ BITREV_W = 0X14U << 10,
+ BITREV_D = 0X15U << 10,
+ EXT_W_H = 0X16U << 10,
+ EXT_W_B = 0X17U << 10,
+
+ FABS_S = 0X4501U << 10,
+ FABS_D = 0X4502U << 10,
+ FNEG_S = 0X4505U << 10,
+ FNEG_D = 0X4506U << 10,
+ FLOGB_S = 0X4509U << 10,
+ FLOGB_D = 0X450aU << 10,
+ FCLASS_S = 0X450dU << 10,
+ FCLASS_D = 0X450eU << 10,
+ FSQRT_S = 0X4511U << 10,
+ FSQRT_D = 0X4512U << 10,
+ FRECIP_S = 0X4515U << 10,
+ FRECIP_D = 0X4516U << 10,
+ FRSQRT_S = 0X4519U << 10,
+ FRSQRT_D = 0X451aU << 10,
+ FMOV_S = 0X4525U << 10,
+ FMOV_D = 0X4526U << 10,
+ MOVGR2FR_W = 0X4529U << 10,
+ MOVGR2FR_D = 0X452aU << 10,
+ MOVGR2FRH_W = 0X452bU << 10,
+ MOVFR2GR_S = 0X452dU << 10,
+ MOVFR2GR_D = 0X452eU << 10,
+ MOVFRH2GR_S = 0X452fU << 10,
+ MOVGR2FCSR = 0X4530U << 10,
+ MOVFCSR2GR = 0X4532U << 10,
+ MOVFR2CF = 0X4534U << 10,
+ MOVGR2CF = 0X4536U << 10,
+
+ FCVT_S_D = 0x4646U << 10,
+ FCVT_D_S = 0x4649U << 10,
+ FTINTRM_W_S = 0x4681U << 10,
+ FTINTRM_W_D = 0x4682U << 10,
+ FTINTRM_L_S = 0x4689U << 10,
+ FTINTRM_L_D = 0x468aU << 10,
+ FTINTRP_W_S = 0x4691U << 10,
+ FTINTRP_W_D = 0x4692U << 10,
+ FTINTRP_L_S = 0x4699U << 10,
+ FTINTRP_L_D = 0x469aU << 10,
+ FTINTRZ_W_S = 0x46a1U << 10,
+ FTINTRZ_W_D = 0x46a2U << 10,
+ FTINTRZ_L_S = 0x46a9U << 10,
+ FTINTRZ_L_D = 0x46aaU << 10,
+ FTINTRNE_W_S = 0x46b1U << 10,
+ FTINTRNE_W_D = 0x46b2U << 10,
+ FTINTRNE_L_S = 0x46b9U << 10,
+ FTINTRNE_L_D = 0x46baU << 10,
+ FTINT_W_S = 0x46c1U << 10,
+ FTINT_W_D = 0x46c2U << 10,
+ FTINT_L_S = 0x46c9U << 10,
+ FTINT_L_D = 0x46caU << 10,
+ FFINT_S_W = 0x4744U << 10,
+ FFINT_S_L = 0x4746U << 10,
+ FFINT_D_W = 0x4748U << 10,
+ FFINT_D_L = 0x474aU << 10,
+ FRINT_S = 0x4791U << 10,
+ FRINT_D = 0x4792U << 10,
+
+ MOVCF2FR = 0x4535U << 10,
+ MOVCF2GR = 0x4537U << 10
+};
+
+// ----- Emulated conditions.
+// On LOONG64 we use this enum to abstract from conditional branch instructions.
+// The 'U' prefix is used to specify unsigned comparisons.
+enum Condition {
+ // Any value < 0 is considered no_condition.
+ kNoCondition = -1,
+ overflow = 0,
+ no_overflow = 1,
+ Uless = 2,
+ Ugreater_equal = 3,
+ Uless_equal = 4,
+ Ugreater = 5,
+ equal = 6,
+ not_equal = 7, // Unordered or Not Equal.
+ negative = 8,
+ positive = 9,
+ parity_even = 10,
+ parity_odd = 11,
+ less = 12,
+ greater_equal = 13,
+ less_equal = 14,
+ greater = 15,
+ ueq = 16, // Unordered or Equal.
+ ogl = 17, // Ordered and Not Equal.
+ cc_always = 18,
+
+ // Aliases.
+ carry = Uless,
+ not_carry = Ugreater_equal,
+ zero = equal,
+ eq = equal,
+ not_zero = not_equal,
+ ne = not_equal,
+ nz = not_equal,
+ sign = negative,
+ not_sign = positive,
+ mi = negative,
+ pl = positive,
+ hi = Ugreater,
+ ls = Uless_equal,
+ ge = greater_equal,
+ lt = less,
+ gt = greater,
+ le = less_equal,
+ hs = Ugreater_equal,
+ lo = Uless,
+ al = cc_always,
+ ult = Uless,
+ uge = Ugreater_equal,
+ ule = Uless_equal,
+ ugt = Ugreater,
+ cc_default = kNoCondition
+};
+
+// Returns the equivalent of !cc.
+// Negation of the default kNoCondition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc) {
+ DCHECK(cc != cc_always);
+ return static_cast<Condition>(cc ^ 1);
+}
+
+inline Condition NegateFpuCondition(Condition cc) {
+ DCHECK(cc != cc_always);
+ switch (cc) {
+ case ult:
+ return ge;
+ case ugt:
+ return le;
+ case uge:
+ return lt;
+ case ule:
+ return gt;
+ case lt:
+ return uge;
+ case gt:
+ return ule;
+ case ge:
+ return ult;
+ case le:
+ return ugt;
+ case eq:
+ return ne;
+ case ne:
+ return eq;
+ case ueq:
+ return ogl;
+ case ogl:
+ return ueq;
+ default:
+ return cc;
+ }
+}
+
+// ----- Coprocessor conditions.
+enum FPUCondition {
+ kNoFPUCondition = -1,
+
+ CAF = 0x00, // False.
+ SAF = 0x01, // False.
+ CLT = 0x02, // Less Than quiet
+ // SLT = 0x03, // Less Than signaling
+ CEQ = 0x04,
+ SEQ = 0x05,
+ CLE = 0x06,
+ SLE = 0x07,
+ CUN = 0x08,
+ SUN = 0x09,
+ CULT = 0x0a,
+ SULT = 0x0b,
+ CUEQ = 0x0c,
+ SUEQ = 0x0d,
+ CULE = 0x0e,
+ SULE = 0x0f,
+ CNE = 0x10,
+ SNE = 0x11,
+ COR = 0x14,
+ SOR = 0x15,
+ CUNE = 0x18,
+ SUNE = 0x19,
+};
+
+const uint32_t kFPURoundingModeShift = 8;
+const uint32_t kFPURoundingModeMask = 0b11 << kFPURoundingModeShift;
+
+// FPU rounding modes.
+enum FPURoundingMode {
+ RN = 0b00 << kFPURoundingModeShift, // Round to Nearest.
+ RZ = 0b01 << kFPURoundingModeShift, // Round towards zero.
+ RP = 0b10 << kFPURoundingModeShift, // Round towards Plus Infinity.
+ RM = 0b11 << kFPURoundingModeShift, // Round towards Minus Infinity.
+
+ // Aliases.
+ kRoundToNearest = RN,
+ kRoundToZero = RZ,
+ kRoundToPlusInf = RP,
+ kRoundToMinusInf = RM,
+
+ mode_round = RN,
+ mode_ceil = RP,
+ mode_floor = RM,
+ mode_trunc = RZ
+};
+
+enum CheckForInexactConversion {
+ kCheckForInexactConversion,
+ kDontCheckForInexactConversion
+};
+
+enum class MaxMinKind : int { kMin = 0, kMax = 1 };
+
+// -----------------------------------------------------------------------------
+// Hints.
+
+// Branch hints are not used on the LOONG64. They are defined so that they can
+// appear in shared function signatures, but will be ignored in LOONG64
+// implementations.
+enum Hint { no_hint = 0 };
+
+inline Hint NegateHint(Hint hint) { return no_hint; }
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+// These constants are declared in assembler-loong64.cc, as they use named
+// registers and other constants.
+
+// Break 0xfffff, reserved for redirected real time call.
+const Instr rtCallRedirInstr = BREAK | call_rt_redirected;
+// A nop instruction. (Encoding of addi_w 0 0 0).
+const Instr nopInstr = ADDI_W;
+
+constexpr uint8_t kInstrSize = 4;
+constexpr uint8_t kInstrSizeLog2 = 2;
+
+class InstructionBase {
+ public:
+ enum Type {
+ kOp6Type,
+ kOp7Type,
+ kOp8Type,
+ kOp10Type,
+ kOp12Type,
+ kOp14Type,
+ kOp17Type,
+ kOp22Type,
+ kUnsupported = -1
+ };
+
+ // Get the raw instruction bits.
+ inline Instr InstructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void SetInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
+
+ // Read a bit field out of the instruction bits.
+ inline int Bits(int hi, int lo) const {
+ return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1);
+ }
+
+ // Safe to call within InstructionType().
+ inline int RjFieldRawNoAssert() const {
+ return InstructionBits() & kRjFieldMask;
+ }
+
+ // Get the encoding type of the instruction.
+ inline Type InstructionType() const;
+
+ protected:
+ InstructionBase() {}
+};
+
+template <class T>
+class InstructionGetters : public T {
+ public:
+ inline int RjValue() const {
+ return this->Bits(kRjShift + kRjBits - 1, kRjShift);
+ }
+
+ inline int RkValue() const {
+ return this->Bits(kRkShift + kRkBits - 1, kRkShift);
+ }
+
+ inline int RdValue() const {
+ return this->Bits(kRdShift + kRdBits - 1, kRdShift);
+ }
+
+ inline int Sa2Value() const {
+ return this->Bits(kSaShift + kSa2Bits - 1, kSaShift);
+ }
+
+ inline int Sa3Value() const {
+ return this->Bits(kSaShift + kSa3Bits - 1, kSaShift);
+ }
+
+ inline int Ui5Value() const {
+ return this->Bits(kUi5Shift + kUi5Bits - 1, kUi5Shift);
+ }
+
+ inline int Ui6Value() const {
+ return this->Bits(kUi6Shift + kUi6Bits - 1, kUi6Shift);
+ }
+
+ inline int Ui12Value() const {
+ return this->Bits(kUi12Shift + kUi12Bits - 1, kUi12Shift);
+ }
+
+ inline int LsbwValue() const {
+ return this->Bits(kLsbwShift + kLsbwBits - 1, kLsbwShift);
+ }
+
+ inline int MsbwValue() const {
+ return this->Bits(kMsbwShift + kMsbwBits - 1, kMsbwShift);
+ }
+
+ inline int LsbdValue() const {
+ return this->Bits(kLsbdShift + kLsbdBits - 1, kLsbdShift);
+ }
+
+ inline int MsbdValue() const {
+ return this->Bits(kMsbdShift + kMsbdBits - 1, kMsbdShift);
+ }
+
+ inline int CondValue() const {
+ return this->Bits(kCondShift + kCondBits - 1, kCondShift);
+ }
+
+ inline int Si12Value() const {
+ return this->Bits(kSi12Shift + kSi12Bits - 1, kSi12Shift);
+ }
+
+ inline int Si14Value() const {
+ return this->Bits(kSi14Shift + kSi14Bits - 1, kSi14Shift);
+ }
+
+ inline int Si16Value() const {
+ return this->Bits(kSi16Shift + kSi16Bits - 1, kSi16Shift);
+ }
+
+ inline int Si20Value() const {
+ return this->Bits(kSi20Shift + kSi20Bits - 1, kSi20Shift);
+ }
+
+ inline int FdValue() const {
+ return this->Bits(kFdShift + kFdBits - 1, kFdShift);
+ }
+
+ inline int FaValue() const {
+ return this->Bits(kFaShift + kFaBits - 1, kFaShift);
+ }
+
+ inline int FjValue() const {
+ return this->Bits(kFjShift + kFjBits - 1, kFjShift);
+ }
+
+ inline int FkValue() const {
+ return this->Bits(kFkShift + kFkBits - 1, kFkShift);
+ }
+
+ inline int CjValue() const {
+ return this->Bits(kCjShift + kCjBits - 1, kCjShift);
+ }
+
+ inline int CdValue() const {
+ return this->Bits(kCdShift + kCdBits - 1, kCdShift);
+ }
+
+ inline int CaValue() const {
+ return this->Bits(kCaShift + kCaBits - 1, kCaShift);
+ }
+
+ inline int CodeValue() const {
+ return this->Bits(kCodeShift + kCodeBits - 1, kCodeShift);
+ }
+
+ inline int Hint5Value() const {
+ return this->Bits(kHint5Shift + kHint5Bits - 1, kHint5Shift);
+ }
+
+ inline int Hint15Value() const {
+ return this->Bits(kHint15Shift + kHint15Bits - 1, kHint15Shift);
+ }
+
+ inline int Offs16Value() const {
+ return this->Bits(kOffsLowShift + kOffsLowBits - 1, kOffsLowShift);
+ }
+
+ inline int Offs21Value() const {
+ int low = this->Bits(kOffsLowShift + kOffsLowBits - 1, kOffsLowShift);
+ int high =
+ this->Bits(kOffs21HighShift + kOffs21HighBits - 1, kOffs21HighShift);
+ return ((high << kOffsLowBits) + low);
+ }
+
+ inline int Offs26Value() const {
+ int low = this->Bits(kOffsLowShift + kOffsLowBits - 1, kOffsLowShift);
+ int high =
+ this->Bits(kOffs26HighShift + kOffs26HighBits - 1, kOffs26HighShift);
+ return ((high << kOffsLowBits) + low);
+ }
+
+ inline int RjFieldRaw() const {
+ return this->InstructionBits() & kRjFieldMask;
+ }
+
+ inline int RkFieldRaw() const {
+ return this->InstructionBits() & kRkFieldMask;
+ }
+
+ inline int RdFieldRaw() const {
+ return this->InstructionBits() & kRdFieldMask;
+ }
+
+ inline int32_t ImmValue(int bits) const { return this->Bits(bits - 1, 0); }
+
+ /*TODO*/
+ inline int32_t Imm12Value() const { abort(); }
+
+ inline int32_t Imm14Value() const { abort(); }
+
+ inline int32_t Imm16Value() const { abort(); }
+
+ // Say if the instruction is a break.
+ bool IsTrap() const;
+};
+
+class Instruction : public InstructionGetters<InstructionBase> {
+ public:
+ // Instructions are read of out a code stream. The only way to get a
+ // reference to an instruction is to convert a pointer. There is no way
+ // to allocate or create instances of class Instruction.
+ // Use the At(pc) function to create references to Instruction.
+ static Instruction* At(byte* pc) {
+ return reinterpret_cast<Instruction*>(pc);
+ }
+
+ private:
+ // We need to prevent the creation of instances of class Instruction.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
+};
+
+// -----------------------------------------------------------------------------
+// LOONG64 assembly various constants.
+
+const int kInvalidStackOffset = -1;
+
+static const int kNegOffset = 0x00008000;
+
+InstructionBase::Type InstructionBase::InstructionType() const {
+ InstructionBase::Type kType = kUnsupported;
+
+ // Check for kOp6Type
+ switch (Bits(31, 26) << 26) {
+ case ADDU16I_D:
+ case BEQZ:
+ case BNEZ:
+ case BCZ:
+ case JIRL:
+ case B:
+ case BL:
+ case BEQ:
+ case BNE:
+ case BLT:
+ case BGE:
+ case BLTU:
+ case BGEU:
+ kType = kOp6Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp7Type
+ switch (Bits(31, 25) << 25) {
+ case LU12I_W:
+ case LU32I_D:
+ case PCADDI:
+ case PCALAU12I:
+ case PCADDU12I:
+ case PCADDU18I:
+ kType = kOp7Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp8Type
+ switch (Bits(31, 24) << 24) {
+ case LDPTR_W:
+ case STPTR_W:
+ case LDPTR_D:
+ case STPTR_D:
+ case LL_W:
+ case SC_W:
+ case LL_D:
+ case SC_D:
+ kType = kOp8Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp10Type
+ switch (Bits(31, 22) << 22) {
+ case BSTR_W: {
+ // If Bit(21) = 0, then the Opcode is not BSTR_W.
+ if (Bit(21) == 0)
+ kType = kUnsupported;
+ else
+ kType = kOp10Type;
+ break;
+ }
+ case BSTRINS_D:
+ case BSTRPICK_D:
+ case SLTI:
+ case SLTUI:
+ case ADDI_W:
+ case ADDI_D:
+ case LU52I_D:
+ case ANDI:
+ case ORI:
+ case XORI:
+ case LD_B:
+ case LD_H:
+ case LD_W:
+ case LD_D:
+ case ST_B:
+ case ST_H:
+ case ST_W:
+ case ST_D:
+ case LD_BU:
+ case LD_HU:
+ case LD_WU:
+ case FLD_S:
+ case FST_S:
+ case FLD_D:
+ case FST_D:
+ kType = kOp10Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp12Type
+ switch (Bits(31, 20) << 20) {
+ case FMADD_S:
+ case FMADD_D:
+ case FMSUB_S:
+ case FMSUB_D:
+ case FNMADD_S:
+ case FNMADD_D:
+ case FNMSUB_S:
+ case FNMSUB_D:
+ case FCMP_COND_S:
+ case FCMP_COND_D:
+ case FSEL:
+ kType = kOp12Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp14Type
+ switch (Bits(31, 18) << 18) {
+ case ALSL:
+ case BYTEPICK_W:
+ case BYTEPICK_D:
+ case ALSL_D:
+ case SLLI:
+ case SRLI:
+ case SRAI:
+ case ROTRI:
+ kType = kOp14Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp17Type
+ switch (Bits(31, 15) << 15) {
+ case ADD_W:
+ case ADD_D:
+ case SUB_W:
+ case SUB_D:
+ case SLT:
+ case SLTU:
+ case MASKEQZ:
+ case MASKNEZ:
+ case NOR:
+ case AND:
+ case OR:
+ case XOR:
+ case ORN:
+ case ANDN:
+ case SLL_W:
+ case SRL_W:
+ case SRA_W:
+ case SLL_D:
+ case SRL_D:
+ case SRA_D:
+ case ROTR_D:
+ case ROTR_W:
+ case MUL_W:
+ case MULH_W:
+ case MULH_WU:
+ case MUL_D:
+ case MULH_D:
+ case MULH_DU:
+ case MULW_D_W:
+ case MULW_D_WU:
+ case DIV_W:
+ case MOD_W:
+ case DIV_WU:
+ case MOD_WU:
+ case DIV_D:
+ case MOD_D:
+ case DIV_DU:
+ case MOD_DU:
+ case BREAK:
+ case FADD_S:
+ case FADD_D:
+ case FSUB_S:
+ case FSUB_D:
+ case FMUL_S:
+ case FMUL_D:
+ case FDIV_S:
+ case FDIV_D:
+ case FMAX_S:
+ case FMAX_D:
+ case FMIN_S:
+ case FMIN_D:
+ case FMAXA_S:
+ case FMAXA_D:
+ case FMINA_S:
+ case FMINA_D:
+ case LDX_B:
+ case LDX_H:
+ case LDX_W:
+ case LDX_D:
+ case STX_B:
+ case STX_H:
+ case STX_W:
+ case STX_D:
+ case LDX_BU:
+ case LDX_HU:
+ case LDX_WU:
+ case FLDX_S:
+ case FLDX_D:
+ case FSTX_S:
+ case FSTX_D:
+ case AMSWAP_W:
+ case AMSWAP_D:
+ case AMADD_W:
+ case AMADD_D:
+ case AMAND_W:
+ case AMAND_D:
+ case AMOR_W:
+ case AMOR_D:
+ case AMXOR_W:
+ case AMXOR_D:
+ case AMMAX_W:
+ case AMMAX_D:
+ case AMMIN_W:
+ case AMMIN_D:
+ case AMMAX_WU:
+ case AMMAX_DU:
+ case AMMIN_WU:
+ case AMMIN_DU:
+ case AMSWAP_DB_W:
+ case AMSWAP_DB_D:
+ case AMADD_DB_W:
+ case AMADD_DB_D:
+ case AMAND_DB_W:
+ case AMAND_DB_D:
+ case AMOR_DB_W:
+ case AMOR_DB_D:
+ case AMXOR_DB_W:
+ case AMXOR_DB_D:
+ case AMMAX_DB_W:
+ case AMMAX_DB_D:
+ case AMMIN_DB_W:
+ case AMMIN_DB_D:
+ case AMMAX_DB_WU:
+ case AMMAX_DB_DU:
+ case AMMIN_DB_WU:
+ case AMMIN_DB_DU:
+ case DBAR:
+ case IBAR:
+ case FSCALEB_S:
+ case FSCALEB_D:
+ case FCOPYSIGN_S:
+ case FCOPYSIGN_D:
+ kType = kOp17Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp22Type
+ switch (Bits(31, 10) << 10) {
+ case CLZ_W:
+ case CTZ_W:
+ case CLZ_D:
+ case CTZ_D:
+ case REVB_2H:
+ case REVB_4H:
+ case REVB_2W:
+ case REVB_D:
+ case REVH_2W:
+ case REVH_D:
+ case BITREV_4B:
+ case BITREV_8B:
+ case BITREV_W:
+ case BITREV_D:
+ case EXT_W_B:
+ case EXT_W_H:
+ case FABS_S:
+ case FABS_D:
+ case FNEG_S:
+ case FNEG_D:
+ case FSQRT_S:
+ case FSQRT_D:
+ case FMOV_S:
+ case FMOV_D:
+ case MOVGR2FR_W:
+ case MOVGR2FR_D:
+ case MOVGR2FRH_W:
+ case MOVFR2GR_S:
+ case MOVFR2GR_D:
+ case MOVFRH2GR_S:
+ case MOVGR2FCSR:
+ case MOVFCSR2GR:
+ case FCVT_S_D:
+ case FCVT_D_S:
+ case FTINTRM_W_S:
+ case FTINTRM_W_D:
+ case FTINTRM_L_S:
+ case FTINTRM_L_D:
+ case FTINTRP_W_S:
+ case FTINTRP_W_D:
+ case FTINTRP_L_S:
+ case FTINTRP_L_D:
+ case FTINTRZ_W_S:
+ case FTINTRZ_W_D:
+ case FTINTRZ_L_S:
+ case FTINTRZ_L_D:
+ case FTINTRNE_W_S:
+ case FTINTRNE_W_D:
+ case FTINTRNE_L_S:
+ case FTINTRNE_L_D:
+ case FTINT_W_S:
+ case FTINT_W_D:
+ case FTINT_L_S:
+ case FTINT_L_D:
+ case FFINT_S_W:
+ case FFINT_S_L:
+ case FFINT_D_W:
+ case FFINT_D_L:
+ case FRINT_S:
+ case FRINT_D:
+ case MOVFR2CF:
+ case MOVCF2FR:
+ case MOVGR2CF:
+ case MOVCF2GR:
+ case FRECIP_S:
+ case FRECIP_D:
+ case FRSQRT_S:
+ case FRSQRT_D:
+ case FCLASS_S:
+ case FCLASS_D:
+ case FLOGB_S:
+ case FLOGB_D:
+ case CLO_W:
+ case CTO_W:
+ case CLO_D:
+ case CTO_D:
+ kType = kOp22Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ return kType;
+}
+
+// -----------------------------------------------------------------------------
+// Instructions.
+
+template <class P>
+bool InstructionGetters<P>::IsTrap() const {
+ return true;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_LOONG64_CONSTANTS_LOONG64_H_
diff --git a/deps/v8/src/codegen/loong64/cpu-loong64.cc b/deps/v8/src/codegen/loong64/cpu-loong64.cc
new file mode 100644
index 0000000000..6b4040676d
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/cpu-loong64.cc
@@ -0,0 +1,38 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// CPU specific code for LoongArch independent of OS goes here.
+
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/codegen/cpu-features.h"
+
+namespace v8 {
+namespace internal {
+
+void CpuFeatures::FlushICache(void* start, size_t size) {
+#if defined(V8_HOST_ARCH_LOONG64)
+ // Nothing to do, flushing no instructions.
+ if (size == 0) {
+ return;
+ }
+
+#if defined(ANDROID) && !defined(__LP64__)
+ // Bionic cacheflush can typically run in userland, avoiding kernel call.
+ char* end = reinterpret_cast<char*>(start) + size;
+ cacheflush(reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end),
+ 0);
+#else // ANDROID
+ asm("ibar 0\n");
+#endif // ANDROID
+#endif // V8_HOST_ARCH_LOONG64
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h b/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h
new file mode 100644
index 0000000000..7947c97dc3
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h
@@ -0,0 +1,278 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_LOONG64_INTERFACE_DESCRIPTORS_LOONG64_INL_H_
+#define V8_CODEGEN_LOONG64_INTERFACE_DESCRIPTORS_LOONG64_INL_H_
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(a0, a1, a2, a3, a4);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+#if DEBUG
+template <typename DerivedDescriptor>
+void StaticCallInterfaceDescriptor<DerivedDescriptor>::
+ VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
+ RegList allocatable_regs = data->allocatable_registers();
+ if (argc >= 1) DCHECK(allocatable_regs | a0.bit());
+ if (argc >= 2) DCHECK(allocatable_regs | a1.bit());
+ if (argc >= 3) DCHECK(allocatable_regs | a2.bit());
+ if (argc >= 4) DCHECK(allocatable_regs | a3.bit());
+ if (argc >= 5) DCHECK(allocatable_regs | a4.bit());
+ if (argc >= 6) DCHECK(allocatable_regs | a5.bit());
+ if (argc >= 7) DCHECK(allocatable_regs | a6.bit());
+ if (argc >= 8) DCHECK(allocatable_regs | a7.bit());
+ // Additional arguments are passed on the stack.
+}
+#endif // DEBUG
+
+// static
+constexpr auto WriteBarrierDescriptor::registers() {
+ return RegisterArray(a1, a5, a4, a2, a0, a3);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == a0);
+ return RegisterArray(a0, a1, a2, a3, cp);
+}
+
+// static
+constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == a0);
+ return RegisterArray(a0, a1, a2, a3, cp);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return a0; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return a4;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return a0; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return a4; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return a5; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return a0; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return a3; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ return a2;
+}
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return a3; }
+
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(a3); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // a1: target
+ // a0: number of arguments
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a4 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a0, a4, a2);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // a1: the target to call
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // a1 : function template info
+ // a0 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a2 : the object to spread
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a2 : the arguments list
+ return RegisterArray(a1, a2);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a4 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a3, a0, a4, a2);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // a1: the target to call
+ // a3: new target
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the object to spread
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the arguments list
+ return RegisterArray(a1, a3, a2);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // a1: target
+ // a3: new target
+ // a0: number of arguments
+ // a2: allocation site or undefined
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(a0); }
+
+// static
+constexpr auto CompareDescriptor::registers() { return RegisterArray(a1, a0); }
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ // a2: feedback slot
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(a1, a0); }
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ // a2: feedback slot
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ // a1 : kApiFunctionAddress
+ // a2 : kArgc
+ // a3 : kCallData
+ // a0 : kHolder
+ return RegisterArray(a1, a2, a3, a0);
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ // a0 : argument count (not including receiver)
+ // a2 : address of first argument
+ // a1 : the target callable to be call
+ return RegisterArray(a0, a2, a1);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ // a0 : argument count (not including receiver)
+ // a4 : address of the first argument
+ // a1 : constructor to call
+ // a3 : new target
+ // a2 : allocation site feedback if available, undefined otherwise
+ return RegisterArray(a0, a4, a1, a3, a2);
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ // v0 : the value to pass to the generator
+ // a1 : the JSGeneratorObject to resume
+ return RegisterArray(a0, a1);
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(a0, a1);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
+
+#endif // V8_CODEGEN_LOONG64_INTERFACE_DESCRIPTORS_LOONG64_INL_H_
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
new file mode 100644
index 0000000000..b999c1166b
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
@@ -0,0 +1,4107 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits.h> // For LONG_MIN, LONG_MAX.
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
+#include "src/debug/debug.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frames-inl.h"
+#include "src/heap/memory-chunk.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
+#include "src/objects/heap-number.h"
+#include "src/runtime/runtime.h"
+#include "src/snapshot/snapshot.h"
+
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
+// Satisfy cpplint check, but don't include platform-specific header. It is
+// included recursively via macro-assembler.h.
+#if 0
+#include "src/codegen/loong64/macro-assembler-loong64.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+static inline bool IsZero(const Operand& rk) {
+ if (rk.is_reg()) {
+ return rk.rm() == zero_reg;
+ } else {
+ return rk.immediate() == 0;
+ }
+}
+
+int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) const {
+ int bytes = 0;
+ RegList exclusions = 0;
+ if (exclusion1 != no_reg) {
+ exclusions |= exclusion1.bit();
+ if (exclusion2 != no_reg) {
+ exclusions |= exclusion2.bit();
+ if (exclusion3 != no_reg) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ RegList list = kJSCallerSaved & ~exclusions;
+ bytes += NumRegs(list) * kPointerSize;
+
+ if (fp_mode == SaveFPRegsMode::kSave) {
+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ }
+
+ return bytes;
+}
+
+int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ int bytes = 0;
+ RegList exclusions = 0;
+ if (exclusion1 != no_reg) {
+ exclusions |= exclusion1.bit();
+ if (exclusion2 != no_reg) {
+ exclusions |= exclusion2.bit();
+ if (exclusion3 != no_reg) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ RegList list = kJSCallerSaved & ~exclusions;
+ MultiPush(list);
+ bytes += NumRegs(list) * kPointerSize;
+
+ if (fp_mode == SaveFPRegsMode::kSave) {
+ MultiPushFPU(kCallerSavedFPU);
+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ }
+
+ return bytes;
+}
+
+int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ int bytes = 0;
+ if (fp_mode == SaveFPRegsMode::kSave) {
+ MultiPopFPU(kCallerSavedFPU);
+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ }
+
+ RegList exclusions = 0;
+ if (exclusion1 != no_reg) {
+ exclusions |= exclusion1.bit();
+ if (exclusion2 != no_reg) {
+ exclusions |= exclusion2.bit();
+ if (exclusion3 != no_reg) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ RegList list = kJSCallerSaved & ~exclusions;
+ MultiPop(list);
+ bytes += NumRegs(list) * kPointerSize;
+
+ return bytes;
+}
+
+void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
+ Ld_d(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index)));
+}
+
+void TurboAssembler::PushCommonFrame(Register marker_reg) {
+ if (marker_reg.is_valid()) {
+ Push(ra, fp, marker_reg);
+ Add_d(fp, sp, Operand(kPointerSize));
+ } else {
+ Push(ra, fp);
+ mov(fp, sp);
+ }
+}
+
+void TurboAssembler::PushStandardFrame(Register function_reg) {
+ int offset = -StandardFrameConstants::kContextOffset;
+ if (function_reg.is_valid()) {
+ Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister);
+ offset += 2 * kPointerSize;
+ } else {
+ Push(ra, fp, cp, kJavaScriptCallArgCountRegister);
+ offset += kPointerSize;
+ }
+ Add_d(fp, sp, Operand(offset));
+}
+
+// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
+// The register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWriteField(Register object, int offset,
+ Register value, RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip barrier if writing a smi.
+ if (smi_check == SmiCheck::kInline) {
+ JumpIfSmi(value, &done);
+ }
+
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so so offset must be a multiple of kPointerSize.
+ DCHECK(IsAligned(offset, kPointerSize));
+
+ if (FLAG_debug_code) {
+ Label ok;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Add_d(scratch, object, offset - kHeapObjectTag);
+ And(scratch, scratch, Operand(kPointerSize - 1));
+ Branch(&ok, eq, scratch, Operand(zero_reg));
+ Abort(AbortReason::kUnalignedCellInWriteBarrier);
+ bind(&ok);
+ }
+
+ RecordWrite(object, Operand(offset - kHeapObjectTag), value, ra_status,
+ save_fp, remembered_set_action, SmiCheck::kOmit);
+
+ bind(&done);
+}
+
+void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+ if (registers == 0) return;
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
+ }
+ MultiPush(regs);
+}
+
+void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+ if (registers == 0) return;
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
+ }
+ MultiPop(regs);
+}
+
+void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
+ SaveFPRegsMode fp_mode) {
+ RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
+ MaybeSaveRegisters(registers);
+
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
+
+ MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
+
+ Call(isolate()->builtins()->code_handle(
+ Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
+ RelocInfo::CODE_TARGET);
+ MaybeRestoreRegisters(registers);
+}
+
+void TurboAssembler::CallRecordWriteStubSaveRegisters(
+ Register object, Operand offset, RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, StubCallMode mode) {
+ ASM_CODE_COMMENT(this);
+ RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
+ MaybeSaveRegisters(registers);
+
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
+
+ MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
+
+ CallRecordWriteStub(object_parameter, slot_address_parameter,
+ remembered_set_action, fp_mode, mode);
+
+ MaybeRestoreRegisters(registers);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode) {
+ // Use CallRecordWriteStubSaveRegisters if the object and slot registers
+ // need to be caller saved.
+ DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
+ DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
+#if V8_ENABLE_WEBASSEMBLY
+ if (mode == StubCallMode::kCallWasmRuntimeStub) {
+ auto wasm_target =
+ wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode);
+ Call(wasm_target, RelocInfo::WASM_STUB_CALL);
+#else
+ if (false) {
+#endif
+ } else {
+ auto builtin = Builtins::GetRecordWriteStub(remembered_set_action, fp_mode);
+ if (options().inline_offheap_trampolines) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ Call(scratch);
+ } else {
+ Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
+ }
+}
+
+void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
+ Register object, Operand offset) {
+ DCHECK_NE(dst_object, dst_slot);
+ // If `offset` is a register, it cannot overlap with `object`.
+ DCHECK_IMPLIES(!offset.IsImmediate(), offset.rm() != object);
+
+ // If the slot register does not overlap with the object register, we can
+ // overwrite it.
+ if (dst_slot != object) {
+ Add_d(dst_slot, object, offset);
+ mov(dst_object, object);
+ return;
+ }
+
+ DCHECK_EQ(dst_slot, object);
+
+ // If the destination object register does not overlap with the offset
+ // register, we can overwrite it.
+ if (offset.IsImmediate() || (offset.rm() != dst_object)) {
+ mov(dst_object, dst_slot);
+ Add_d(dst_slot, dst_slot, offset);
+ return;
+ }
+
+ DCHECK_EQ(dst_object, offset.rm());
+
+ // We only have `dst_slot` and `dst_object` left as distinct registers so we
+ // have to swap them. We write this as a add+sub sequence to avoid using a
+ // scratch register.
+ Add_d(dst_slot, dst_slot, dst_object);
+ Sub_d(dst_object, dst_slot, dst_object);
+}
+
+// If lr_status is kLRHasBeenSaved, lr will be clobbered.
+// TODO(LOONG_dev): LOONG64 Check this comment
+// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
+// The register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object, Operand offset,
+ Register value, RAStatus ra_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ DCHECK(!AreAliased(object, value));
+
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Add_d(scratch, object, offset);
+ Ld_d(scratch, MemOperand(scratch, 0));
+ Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch,
+ Operand(value));
+ }
+
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
+ !FLAG_incremental_marking) ||
+ FLAG_disable_write_barriers) {
+ return;
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ if (smi_check == SmiCheck::kInline) {
+ DCHECK_EQ(0, kSmiTag);
+ JumpIfSmi(value, &done);
+ }
+
+ CheckPageFlag(value, MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ &done);
+
+ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, eq,
+ &done);
+
+ // Record the actual write.
+ if (ra_status == kRAHasNotBeenSaved) {
+ Push(ra);
+ }
+
+ Register slot_address = WriteBarrierDescriptor::SlotAddressRegister();
+ DCHECK(!AreAliased(object, slot_address, value));
+ DCHECK(offset.IsImmediate());
+ Add_d(slot_address, object, offset);
+ CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
+ if (ra_status == kRAHasNotBeenSaved) {
+ Pop(ra);
+ }
+
+ bind(&done);
+}
+
+// ---------------------------------------------------------------------------
+// Instruction macros.
+
+void TurboAssembler::Add_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ add_w(rd, rj, rk.rm());
+ } else {
+ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ addi_w(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ add_w(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Add_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ add_d(rd, rj, rk.rm());
+ } else {
+ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ addi_d(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ add_d(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Sub_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ sub_w(rd, rj, rk.rm());
+ } else {
+ DCHECK(is_int32(rk.immediate()));
+ if (is_int12(-rk.immediate()) && !MustUseReg(rk.rmode())) {
+ // No subi_w instr, use addi_w(x, y, -imm).
+ addi_w(rd, rj, static_cast<int32_t>(-rk.immediate()));
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ if (-rk.immediate() >> 12 == 0 && !MustUseReg(rk.rmode())) {
+ // Use load -imm and addu when loading -imm generates one instruction.
+ li(scratch, -rk.immediate());
+ add_w(rd, rj, scratch);
+ } else {
+ // li handles the relocation.
+ li(scratch, rk);
+ sub_w(rd, rj, scratch);
+ }
+ }
+ }
+}
+
+void TurboAssembler::Sub_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ sub_d(rd, rj, rk.rm());
+ } else if (is_int12(-rk.immediate()) && !MustUseReg(rk.rmode())) {
+ // No subi_d instr, use addi_d(x, y, -imm).
+ addi_d(rd, rj, static_cast<int32_t>(-rk.immediate()));
+ } else {
+ DCHECK(rj != t7);
+ int li_count = InstrCountForLi64Bit(rk.immediate());
+ int li_neg_count = InstrCountForLi64Bit(-rk.immediate());
+ if (li_neg_count < li_count && !MustUseReg(rk.rmode())) {
+ // Use load -imm and add_d when loading -imm generates one instruction.
+ DCHECK(rk.immediate() != std::numeric_limits<int32_t>::min());
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(-rk.immediate()));
+ add_d(rd, rj, scratch);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, rk);
+ sub_d(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Mul_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mul_w(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mul_w(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mulh_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mulh_w(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mulh_w(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mulh_wu(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mulh_wu(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mulh_wu(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mul_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mul_d(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mul_d(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mulh_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mulh_d(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mulh_d(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Div_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ div_w(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ div_w(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mod_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mod_w(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mod_w(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mod_wu(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mod_wu(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mod_wu(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Div_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ div_d(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ div_d(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Div_wu(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ div_wu(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ div_wu(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Div_du(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ div_du(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ div_du(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mod_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mod_d(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mod_d(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mod_du(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mod_du(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mod_du(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::And(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ and_(rd, rj, rk.rm());
+ } else {
+ if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ andi(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ and_(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Or(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ or_(rd, rj, rk.rm());
+ } else {
+ if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ ori(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ or_(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Xor(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ xor_(rd, rj, rk.rm());
+ } else {
+ if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ xori(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ xor_(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Nor(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ nor(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ nor(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Andn(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ andn(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ andn(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Orn(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ orn(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ orn(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Neg(Register rj, const Operand& rk) {
+ DCHECK(rk.is_reg());
+ sub_d(rj, zero_reg, rk.rm());
+}
+
+void TurboAssembler::Slt(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ slt(rd, rj, rk.rm());
+ } else {
+ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ slti(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ slt(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Sltu(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ sltu(rd, rj, rk.rm());
+ } else {
+ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ sltui(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ sltu(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Sle(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ slt(rd, rk.rm(), rj);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ slt(rd, scratch, rj);
+ }
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sleu(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ sltu(rd, rk.rm(), rj);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ sltu(rd, scratch, rj);
+ }
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sge(Register rd, Register rj, const Operand& rk) {
+ Slt(rd, rj, rk);
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sgeu(Register rd, Register rj, const Operand& rk) {
+ Sltu(rd, rj, rk);
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sgt(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ slt(rd, rk.rm(), rj);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ slt(rd, scratch, rj);
+ }
+}
+
+void TurboAssembler::Sgtu(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ sltu(rd, rk.rm(), rj);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ sltu(rd, scratch, rj);
+ }
+}
+
+void TurboAssembler::Rotr_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ rotr_w(rd, rj, rk.rm());
+ } else {
+ int64_t ror_value = rk.immediate() % 32;
+ if (ror_value < 0) {
+ ror_value += 32;
+ }
+ rotri_w(rd, rj, ror_value);
+ }
+}
+
+void TurboAssembler::Rotr_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ rotr_d(rd, rj, rk.rm());
+ } else {
+ int64_t dror_value = rk.immediate() % 64;
+ if (dror_value < 0) dror_value += 64;
+ rotri_d(rd, rj, dror_value);
+ }
+}
+
+void TurboAssembler::Alsl_w(Register rd, Register rj, Register rk, uint8_t sa,
+ Register scratch) {
+ DCHECK(sa >= 1 && sa <= 31);
+ if (sa <= 4) {
+ alsl_w(rd, rj, rk, sa);
+ } else {
+ Register tmp = rd == rk ? scratch : rd;
+ DCHECK(tmp != rk);
+ slli_w(tmp, rj, sa);
+ add_w(rd, rk, tmp);
+ }
+}
+
+void TurboAssembler::Alsl_d(Register rd, Register rj, Register rk, uint8_t sa,
+ Register scratch) {
+ DCHECK(sa >= 1 && sa <= 31);
+ if (sa <= 4) {
+ alsl_d(rd, rj, rk, sa);
+ } else {
+ Register tmp = rd == rk ? scratch : rd;
+ DCHECK(tmp != rk);
+ slli_d(tmp, rj, sa);
+ add_d(rd, rk, tmp);
+ }
+}
+
+// ------------Pseudo-instructions-------------
+
+// Change endianness
+void TurboAssembler::ByteSwapSigned(Register dest, Register src,
+ int operand_size) {
+ DCHECK(operand_size == 2 || operand_size == 4 || operand_size == 8);
+ if (operand_size == 2) {
+ revb_2h(dest, src);
+ ext_w_h(dest, dest);
+ } else if (operand_size == 4) {
+ revb_2w(dest, src);
+ slli_w(dest, dest, 0);
+ } else {
+ revb_d(dest, dest);
+ }
+}
+
+void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
+ int operand_size) {
+ DCHECK(operand_size == 2 || operand_size == 4);
+ if (operand_size == 2) {
+ revb_2h(dest, src);
+ bstrins_d(dest, zero_reg, 63, 16);
+ } else {
+ revb_2w(dest, src);
+ bstrins_d(dest, zero_reg, 63, 32);
+ }
+}
+
+void TurboAssembler::Ld_b(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_b(rd, source.base(), source.index());
+ } else {
+ ld_b(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Ld_bu(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_bu(rd, source.base(), source.index());
+ } else {
+ ld_bu(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::St_b(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ stx_b(rd, source.base(), source.index());
+ } else {
+ st_b(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Ld_h(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_h(rd, source.base(), source.index());
+ } else {
+ ld_h(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Ld_hu(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_hu(rd, source.base(), source.index());
+ } else {
+ ld_hu(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::St_h(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ stx_h(rd, source.base(), source.index());
+ } else {
+ st_h(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Ld_w(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+
+ if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
+ (source.offset() & 0b11) == 0) {
+ ldptr_w(rd, source.base(), source.offset());
+ return;
+ }
+
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_w(rd, source.base(), source.index());
+ } else {
+ ld_w(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Ld_wu(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_wu(rd, source.base(), source.index());
+ } else {
+ ld_wu(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::St_w(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+
+ if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
+ (source.offset() & 0b11) == 0) {
+ stptr_w(rd, source.base(), source.offset());
+ return;
+ }
+
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ stx_w(rd, source.base(), source.index());
+ } else {
+ st_w(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Ld_d(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+
+ if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
+ (source.offset() & 0b11) == 0) {
+ ldptr_d(rd, source.base(), source.offset());
+ return;
+ }
+
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_d(rd, source.base(), source.index());
+ } else {
+ ld_d(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::St_d(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+
+ if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
+ (source.offset() & 0b11) == 0) {
+ stptr_d(rd, source.base(), source.offset());
+ return;
+ }
+
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ stx_d(rd, source.base(), source.index());
+ } else {
+ st_d(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Fld_s(FPURegister fd, const MemOperand& src) {
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(&tmp);
+ if (tmp.hasIndexReg()) {
+ fldx_s(fd, tmp.base(), tmp.index());
+ } else {
+ fld_s(fd, tmp.base(), tmp.offset());
+ }
+}
+
+void TurboAssembler::Fst_s(FPURegister fs, const MemOperand& src) {
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(&tmp);
+ if (tmp.hasIndexReg()) {
+ fstx_s(fs, tmp.base(), tmp.index());
+ } else {
+ fst_s(fs, tmp.base(), tmp.offset());
+ }
+}
+
+void TurboAssembler::Fld_d(FPURegister fd, const MemOperand& src) {
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(&tmp);
+ if (tmp.hasIndexReg()) {
+ fldx_d(fd, tmp.base(), tmp.index());
+ } else {
+ fld_d(fd, tmp.base(), tmp.offset());
+ }
+}
+
+void TurboAssembler::Fst_d(FPURegister fs, const MemOperand& src) {
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(&tmp);
+ if (tmp.hasIndexReg()) {
+ fstx_d(fs, tmp.base(), tmp.index());
+ } else {
+ fst_d(fs, tmp.base(), tmp.offset());
+ }
+}
+
+void TurboAssembler::Ll_w(Register rd, const MemOperand& rj) {
+ DCHECK(!rj.hasIndexReg());
+ bool is_one_instruction = is_int14(rj.offset());
+ if (is_one_instruction) {
+ ll_w(rd, rj.base(), rj.offset());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, rj.offset());
+ add_d(scratch, scratch, rj.base());
+ ll_w(rd, scratch, 0);
+ }
+}
+
+void TurboAssembler::Ll_d(Register rd, const MemOperand& rj) {
+ DCHECK(!rj.hasIndexReg());
+ bool is_one_instruction = is_int14(rj.offset());
+ if (is_one_instruction) {
+ ll_d(rd, rj.base(), rj.offset());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, rj.offset());
+ add_d(scratch, scratch, rj.base());
+ ll_d(rd, scratch, 0);
+ }
+}
+
+void TurboAssembler::Sc_w(Register rd, const MemOperand& rj) {
+ DCHECK(!rj.hasIndexReg());
+ bool is_one_instruction = is_int14(rj.offset());
+ if (is_one_instruction) {
+ sc_w(rd, rj.base(), rj.offset());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, rj.offset());
+ add_d(scratch, scratch, rj.base());
+ sc_w(rd, scratch, 0);
+ }
+}
+
+void TurboAssembler::Sc_d(Register rd, const MemOperand& rj) {
+ DCHECK(!rj.hasIndexReg());
+ bool is_one_instruction = is_int14(rj.offset());
+ if (is_one_instruction) {
+ sc_d(rd, rj.base(), rj.offset());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, rj.offset());
+ add_d(scratch, scratch, rj.base());
+ sc_d(rd, scratch, 0);
+ }
+}
+
+void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
+ // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
+ // non-isolate-independent code. In many cases it might be cheaper than
+ // embedding the relocatable value.
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(dst, value);
+ return;
+ }
+ li(dst, Operand(value), mode);
+}
+
+void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
+ // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
+ // non-isolate-independent code. In many cases it might be cheaper than
+ // embedding the relocatable value.
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadExternalReference(dst, value);
+ return;
+ }
+ li(dst, Operand(value), mode);
+}
+
+void TurboAssembler::li(Register dst, const StringConstantBase* string,
+ LiFlags mode) {
+ li(dst, Operand::EmbeddedStringConstant(string), mode);
+}
+
+static inline int InstrCountForLiLower32Bit(int64_t value) {
+ if (is_int12(static_cast<int32_t>(value)) ||
+ is_uint12(static_cast<int32_t>(value)) || !(value & kImm12Mask)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) {
+ if (is_int12(static_cast<int32_t>(j.immediate()))) {
+ addi_d(rd, zero_reg, j.immediate());
+ } else if (is_uint12(static_cast<int32_t>(j.immediate()))) {
+ ori(rd, zero_reg, j.immediate() & kImm12Mask);
+ } else {
+ lu12i_w(rd, j.immediate() >> 12 & 0xfffff);
+ if (j.immediate() & kImm12Mask) {
+ ori(rd, rd, j.immediate() & kImm12Mask);
+ }
+ }
+}
+
+int TurboAssembler::InstrCountForLi64Bit(int64_t value) {
+ if (is_int32(value)) {
+ return InstrCountForLiLower32Bit(value);
+ } else if (is_int52(value)) {
+ return InstrCountForLiLower32Bit(value) + 1;
+ } else if ((value & 0xffffffffL) == 0) {
+ // 32 LSBs (Least Significant Bits) all set to zero.
+ uint8_t tzc = base::bits::CountTrailingZeros32(value >> 32);
+ uint8_t lzc = base::bits::CountLeadingZeros32(value >> 32);
+ if (tzc >= 20) {
+ return 1;
+ } else if (tzc + lzc > 12) {
+ return 2;
+ } else {
+ return 3;
+ }
+ } else {
+ int64_t imm21 = (value >> 31) & 0x1fffffL;
+ if (imm21 != 0x1fffffL && imm21 != 0) {
+ return InstrCountForLiLower32Bit(value) + 2;
+ } else {
+ return InstrCountForLiLower32Bit(value) + 1;
+ }
+ }
+ UNREACHABLE();
+ return INT_MAX;
+}
+
+// All changes to if...else conditions here must be added to
+// InstrCountForLi64Bit as well.
+void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
+ DCHECK(!j.is_reg());
+ DCHECK(!MustUseReg(j.rmode()));
+ DCHECK(mode == OPTIMIZE_SIZE);
+ int64_t imm = j.immediate();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ // Normal load of an immediate value which does not need Relocation Info.
+ if (is_int32(imm)) {
+ LiLower32BitHelper(rd, j);
+ } else if (is_int52(imm)) {
+ LiLower32BitHelper(rd, j);
+ lu32i_d(rd, imm >> 32 & 0xfffff);
+ } else if ((imm & 0xffffffffL) == 0) {
+ // 32 LSBs (Least Significant Bits) all set to zero.
+ uint8_t tzc = base::bits::CountTrailingZeros32(imm >> 32);
+ uint8_t lzc = base::bits::CountLeadingZeros32(imm >> 32);
+ if (tzc >= 20) {
+ lu52i_d(rd, zero_reg, imm >> 52 & kImm12Mask);
+ } else if (tzc + lzc > 12) {
+ int32_t mask = (1 << (32 - tzc)) - 1;
+ lu12i_w(rd, imm >> (tzc + 32) & mask);
+ slli_d(rd, rd, tzc + 20);
+ } else {
+ xor_(rd, rd, rd);
+ lu32i_d(rd, imm >> 32 & 0xfffff);
+ lu52i_d(rd, rd, imm >> 52 & kImm12Mask);
+ }
+ } else {
+ int64_t imm21 = (imm >> 31) & 0x1fffffL;
+ LiLower32BitHelper(rd, j);
+ if (imm21 != 0x1fffffL && imm21 != 0) lu32i_d(rd, imm >> 32 & 0xfffff);
+ lu52i_d(rd, rd, imm >> 52 & kImm12Mask);
+ }
+}
+
+void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
+ DCHECK(!j.is_reg());
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) {
+ li_optimized(rd, j, mode);
+ } else if (IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(j.rmode())) {
+ BlockGrowBufferScope block_growbuffer(this);
+ int offset = pc_offset();
+ Address address = j.immediate();
+ saved_handles_for_raw_object_ptr_.push_back(
+ std::make_pair(offset, address));
+ Handle<HeapObject> object(reinterpret_cast<Address*>(address));
+ int64_t immediate = object->ptr();
+ RecordRelocInfo(j.rmode(), immediate);
+ lu12i_w(rd, immediate >> 12 & 0xfffff);
+ ori(rd, rd, immediate & kImm12Mask);
+ lu32i_d(rd, immediate >> 32 & 0xfffff);
+ } else if (MustUseReg(j.rmode())) {
+ int64_t immediate;
+ if (j.IsHeapObjectRequest()) {
+ RequestHeapObject(j.heap_object_request());
+ immediate = 0;
+ } else {
+ immediate = j.immediate();
+ }
+
+ RecordRelocInfo(j.rmode(), immediate);
+ lu12i_w(rd, immediate >> 12 & 0xfffff);
+ ori(rd, rd, immediate & kImm12Mask);
+ lu32i_d(rd, immediate >> 32 & 0xfffff);
+ } else if (mode == ADDRESS_LOAD) {
+ // We always need the same number of instructions as we may need to patch
+ // this code to load another value which may need all 3 instructions.
+ lu12i_w(rd, j.immediate() >> 12 & 0xfffff);
+ ori(rd, rd, j.immediate() & kImm12Mask);
+ lu32i_d(rd, j.immediate() >> 32 & 0xfffff);
+ } else { // mode == CONSTANT_SIZE - always emit the same instruction
+ // sequence.
+ lu12i_w(rd, j.immediate() >> 12 & 0xfffff);
+ ori(rd, rd, j.immediate() & kImm12Mask);
+ lu32i_d(rd, j.immediate() >> 32 & 0xfffff);
+ lu52i_d(rd, rd, j.immediate() >> 52 & kImm12Mask);
+ }
+}
+
+void TurboAssembler::MultiPush(RegList regs) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ St_d(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::MultiPush(RegList regs1, RegList regs2) {
+ DCHECK_EQ(regs1 & regs2, 0);
+ int16_t stack_offset = 0;
+
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs1 & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ St_d(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs2 & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ St_d(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::MultiPush(RegList regs1, RegList regs2, RegList regs3) {
+ DCHECK_EQ(regs1 & regs2, 0);
+ DCHECK_EQ(regs1 & regs3, 0);
+ DCHECK_EQ(regs2 & regs3, 0);
+ int16_t stack_offset = 0;
+
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs1 & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ St_d(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs2 & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ St_d(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs3 & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ St_d(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::MultiPop(RegList regs) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs & (1 << i)) != 0) {
+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::MultiPop(RegList regs1, RegList regs2) {
+ DCHECK_EQ(regs1 & regs2, 0);
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs2 & (1 << i)) != 0) {
+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs1 & (1 << i)) != 0) {
+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::MultiPop(RegList regs1, RegList regs2, RegList regs3) {
+ DCHECK_EQ(regs1 & regs2, 0);
+ DCHECK_EQ(regs1 & regs3, 0);
+ DCHECK_EQ(regs2 & regs3, 0);
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs3 & (1 << i)) != 0) {
+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs2 & (1 << i)) != 0) {
+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs1 & (1 << i)) != 0) {
+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::MultiPushFPU(RegList regs) {
+ int16_t num_to_push = base::bits::CountPopulation(regs);
+ int16_t stack_offset = num_to_push * kDoubleSize;
+
+ Sub_d(sp, sp, Operand(stack_offset));
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs & (1 << i)) != 0) {
+ stack_offset -= kDoubleSize;
+ Fst_d(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ }
+ }
+}
+
+void TurboAssembler::MultiPopFPU(RegList regs) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs & (1 << i)) != 0) {
+ Fld_d(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ stack_offset += kDoubleSize;
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::Bstrpick_w(Register rk, Register rj, uint16_t msbw,
+ uint16_t lsbw) {
+ DCHECK_LT(lsbw, msbw);
+ DCHECK_LT(lsbw, 32);
+ DCHECK_LT(msbw, 32);
+ bstrpick_w(rk, rj, msbw, lsbw);
+}
+
+void TurboAssembler::Bstrpick_d(Register rk, Register rj, uint16_t msbw,
+ uint16_t lsbw) {
+ DCHECK_LT(lsbw, msbw);
+ DCHECK_LT(lsbw, 64);
+ DCHECK_LT(msbw, 64);
+ bstrpick_d(rk, rj, msbw, lsbw);
+}
+
+void TurboAssembler::Neg_s(FPURegister fd, FPURegister fj) { fneg_s(fd, fj); }
+
+void TurboAssembler::Neg_d(FPURegister fd, FPURegister fj) { fneg_d(fd, fj); }
+
+void TurboAssembler::Ffint_d_uw(FPURegister fd, FPURegister fj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ movfr2gr_s(t8, fj);
+ Ffint_d_uw(fd, t8);
+}
+
+void TurboAssembler::Ffint_d_uw(FPURegister fd, Register rj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != t7);
+
+ Bstrpick_d(t7, rj, 31, 0);
+ movgr2fr_d(fd, t7);
+ ffint_d_l(fd, fd);
+}
+
+void TurboAssembler::Ffint_d_ul(FPURegister fd, FPURegister fj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ movfr2gr_d(t8, fj);
+ Ffint_d_ul(fd, t8);
+}
+
+void TurboAssembler::Ffint_d_ul(FPURegister fd, Register rj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != t7);
+
+ Label msb_clear, conversion_done;
+
+ Branch(&msb_clear, ge, rj, Operand(zero_reg));
+
+ // Rj >= 2^63
+ andi(t7, rj, 1);
+ srli_d(rj, rj, 1);
+ or_(t7, t7, rj);
+ movgr2fr_d(fd, t7);
+ ffint_d_l(fd, fd);
+ fadd_d(fd, fd, fd);
+ Branch(&conversion_done);
+
+ bind(&msb_clear);
+ // Rs < 2^63, we can do simple conversion.
+ movgr2fr_d(fd, rj);
+ ffint_d_l(fd, fd);
+
+ bind(&conversion_done);
+}
+
+void TurboAssembler::Ffint_s_uw(FPURegister fd, FPURegister fj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ movfr2gr_d(t8, fj);
+ Ffint_s_uw(fd, t8);
+}
+
+void TurboAssembler::Ffint_s_uw(FPURegister fd, Register rj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != t7);
+
+ bstrpick_d(t7, rj, 31, 0);
+ movgr2fr_d(fd, t7);
+ ffint_s_l(fd, fd);
+}
+
+void TurboAssembler::Ffint_s_ul(FPURegister fd, FPURegister fj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ movfr2gr_d(t8, fj);
+ Ffint_s_ul(fd, t8);
+}
+
+void TurboAssembler::Ffint_s_ul(FPURegister fd, Register rj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != t7);
+
+ Label positive, conversion_done;
+
+ Branch(&positive, ge, rj, Operand(zero_reg));
+
+ // Rs >= 2^31.
+ andi(t7, rj, 1);
+ srli_d(rj, rj, 1);
+ or_(t7, t7, rj);
+ movgr2fr_d(fd, t7);
+ ffint_s_l(fd, fd);
+ fadd_s(fd, fd, fd);
+ Branch(&conversion_done);
+
+ bind(&positive);
+ // Rs < 2^31, we can do simple conversion.
+ movgr2fr_d(fd, rj);
+ ffint_s_l(fd, fd);
+
+ bind(&conversion_done);
+}
+
+void MacroAssembler::Ftintrne_l_d(FPURegister fd, FPURegister fj) {
+ ftintrne_l_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrm_l_d(FPURegister fd, FPURegister fj) {
+ ftintrm_l_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrp_l_d(FPURegister fd, FPURegister fj) {
+ ftintrp_l_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrz_l_d(FPURegister fd, FPURegister fj) {
+ ftintrz_l_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrz_l_ud(FPURegister fd, FPURegister fj,
+ FPURegister scratch) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ // Load to GPR.
+ movfr2gr_d(t8, fj);
+ // Reset sign bit.
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ li(scratch1, 0x7FFFFFFFFFFFFFFFl);
+ and_(t8, t8, scratch1);
+ }
+ movgr2fr_d(scratch, t8);
+ Ftintrz_l_d(fd, scratch);
+}
+
+void TurboAssembler::Ftintrz_uw_d(FPURegister fd, FPURegister fj,
+ FPURegister scratch) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Ftintrz_uw_d(t8, fj, scratch);
+ movgr2fr_w(fd, t8);
+}
+
+void TurboAssembler::Ftintrz_uw_s(FPURegister fd, FPURegister fj,
+ FPURegister scratch) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Ftintrz_uw_s(t8, fj, scratch);
+ movgr2fr_w(fd, t8);
+}
+
+void TurboAssembler::Ftintrz_ul_d(FPURegister fd, FPURegister fj,
+ FPURegister scratch, Register result) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Ftintrz_ul_d(t8, fj, scratch, result);
+ movgr2fr_d(fd, t8);
+}
+
+void TurboAssembler::Ftintrz_ul_s(FPURegister fd, FPURegister fj,
+ FPURegister scratch, Register result) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Ftintrz_ul_s(t8, fj, scratch, result);
+ movgr2fr_d(fd, t8);
+}
+
+void MacroAssembler::Ftintrz_w_d(FPURegister fd, FPURegister fj) {
+ ftintrz_w_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrne_w_d(FPURegister fd, FPURegister fj) {
+ ftintrne_w_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrm_w_d(FPURegister fd, FPURegister fj) {
+ ftintrm_w_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrp_w_d(FPURegister fd, FPURegister fj) {
+ ftintrp_w_d(fd, fj);
+}
+
+void TurboAssembler::Ftintrz_uw_d(Register rd, FPURegister fj,
+ FPURegister scratch) {
+ DCHECK(fj != scratch);
+ DCHECK(rd != t7);
+
+ {
+ // Load 2^31 into scratch as its float representation.
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ li(scratch1, 0x41E00000);
+ movgr2fr_w(scratch, zero_reg);
+ movgr2frh_w(scratch, scratch1);
+ }
+ // Test if scratch > fd.
+ // If fd < 2^31 we can convert it normally.
+ Label simple_convert;
+ CompareF64(fj, scratch, CLT);
+ BranchTrueShortF(&simple_convert);
+
+ // First we subtract 2^31 from fd, then trunc it to rs
+ // and add 2^31 to rj.
+ fsub_d(scratch, fj, scratch);
+ ftintrz_w_d(scratch, scratch);
+ movfr2gr_s(rd, scratch);
+ Or(rd, rd, 1 << 31);
+
+ Label done;
+ Branch(&done);
+ // Simple conversion.
+ bind(&simple_convert);
+ ftintrz_w_d(scratch, fj);
+ movfr2gr_s(rd, scratch);
+
+ bind(&done);
+}
+
+void TurboAssembler::Ftintrz_uw_s(Register rd, FPURegister fj,
+ FPURegister scratch) {
+ DCHECK(fj != scratch);
+ DCHECK(rd != t7);
+ {
+ // Load 2^31 into scratch as its float representation.
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ li(scratch1, 0x4F000000);
+ movgr2fr_w(scratch, scratch1);
+ }
+ // Test if scratch > fs.
+ // If fs < 2^31 we can convert it normally.
+ Label simple_convert;
+ CompareF32(fj, scratch, CLT);
+ BranchTrueShortF(&simple_convert);
+
+ // First we subtract 2^31 from fs, then trunc it to rd
+ // and add 2^31 to rd.
+ fsub_s(scratch, fj, scratch);
+ ftintrz_w_s(scratch, scratch);
+ movfr2gr_s(rd, scratch);
+ Or(rd, rd, 1 << 31);
+
+ Label done;
+ Branch(&done);
+ // Simple conversion.
+ bind(&simple_convert);
+ ftintrz_w_s(scratch, fj);
+ movfr2gr_s(rd, scratch);
+
+ bind(&done);
+}
+
+void TurboAssembler::Ftintrz_ul_d(Register rd, FPURegister fj,
+ FPURegister scratch, Register result) {
+ DCHECK(fj != scratch);
+ DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7));
+
+ Label simple_convert, done, fail;
+ if (result.is_valid()) {
+ mov(result, zero_reg);
+ Move(scratch, -1.0);
+ // If fd =< -1 or unordered, then the conversion fails.
+ CompareF64(fj, scratch, CLE);
+ BranchTrueShortF(&fail);
+ CompareIsNanF64(fj, scratch);
+ BranchTrueShortF(&fail);
+ }
+
+ // Load 2^63 into scratch as its double representation.
+ li(t7, 0x43E0000000000000);
+ movgr2fr_d(scratch, t7);
+
+ // Test if scratch > fs.
+ // If fs < 2^63 we can convert it normally.
+ CompareF64(fj, scratch, CLT);
+ BranchTrueShortF(&simple_convert);
+
+ // First we subtract 2^63 from fs, then trunc it to rd
+ // and add 2^63 to rd.
+ fsub_d(scratch, fj, scratch);
+ ftintrz_l_d(scratch, scratch);
+ movfr2gr_d(rd, scratch);
+ Or(rd, rd, Operand(1UL << 63));
+ Branch(&done);
+
+ // Simple conversion.
+ bind(&simple_convert);
+ ftintrz_l_d(scratch, fj);
+ movfr2gr_d(rd, scratch);
+
+ bind(&done);
+ if (result.is_valid()) {
+ // Conversion is failed if the result is negative.
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ addi_d(scratch1, zero_reg, -1);
+ srli_d(scratch1, scratch1, 1); // Load 2^62.
+ movfr2gr_d(result, scratch);
+ xor_(result, result, scratch1);
+ }
+ Slt(result, zero_reg, result);
+ }
+
+ bind(&fail);
+}
+
+void TurboAssembler::Ftintrz_ul_s(Register rd, FPURegister fj,
+ FPURegister scratch, Register result) {
+ DCHECK(fj != scratch);
+ DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7));
+
+ Label simple_convert, done, fail;
+ if (result.is_valid()) {
+ mov(result, zero_reg);
+ Move(scratch, -1.0f);
+ // If fd =< -1 or unordered, then the conversion fails.
+ CompareF32(fj, scratch, CLE);
+ BranchTrueShortF(&fail);
+ CompareIsNanF32(fj, scratch);
+ BranchTrueShortF(&fail);
+ }
+
+ {
+ // Load 2^63 into scratch as its float representation.
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ li(scratch1, 0x5F000000);
+ movgr2fr_w(scratch, scratch1);
+ }
+
+ // Test if scratch > fs.
+ // If fs < 2^63 we can convert it normally.
+ CompareF32(fj, scratch, CLT);
+ BranchTrueShortF(&simple_convert);
+
+ // First we subtract 2^63 from fs, then trunc it to rd
+ // and add 2^63 to rd.
+ fsub_s(scratch, fj, scratch);
+ ftintrz_l_s(scratch, scratch);
+ movfr2gr_d(rd, scratch);
+ Or(rd, rd, Operand(1UL << 63));
+ Branch(&done);
+
+ // Simple conversion.
+ bind(&simple_convert);
+ ftintrz_l_s(scratch, fj);
+ movfr2gr_d(rd, scratch);
+
+ bind(&done);
+ if (result.is_valid()) {
+ // Conversion is failed if the result is negative or unordered.
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ addi_d(scratch1, zero_reg, -1);
+ srli_d(scratch1, scratch1, 1); // Load 2^62.
+ movfr2gr_d(result, scratch);
+ xor_(result, result, scratch1);
+ }
+ Slt(result, zero_reg, result);
+ }
+
+ bind(&fail);
+}
+
+void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src,
+ FPURoundingMode mode) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = t8;
+ movfcsr2gr(scratch);
+ li(t7, Operand(mode));
+ movgr2fcsr(t7);
+ frint_d(dst, src);
+ movgr2fcsr(scratch);
+}
+
+void TurboAssembler::Floor_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_floor);
+}
+
+void TurboAssembler::Ceil_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_ceil);
+}
+
+void TurboAssembler::Trunc_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_trunc);
+}
+
+void TurboAssembler::Round_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_round);
+}
+
+void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
+ FPURoundingMode mode) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = t8;
+ movfcsr2gr(scratch);
+ li(t7, Operand(mode));
+ movgr2fcsr(t7);
+ frint_s(dst, src);
+ movgr2fcsr(scratch);
+}
+
+void TurboAssembler::Floor_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_floor);
+}
+
+void TurboAssembler::Ceil_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_ceil);
+}
+
+void TurboAssembler::Trunc_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_trunc);
+}
+
+void TurboAssembler::Round_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_round);
+}
+
+void TurboAssembler::CompareF(FPURegister cmp1, FPURegister cmp2,
+ FPUCondition cc, CFRegister cd, bool f32) {
+ if (f32) {
+ fcmp_cond_s(cc, cmp1, cmp2, cd);
+ } else {
+ fcmp_cond_d(cc, cmp1, cmp2, cd);
+ }
+}
+
+void TurboAssembler::CompareIsNanF(FPURegister cmp1, FPURegister cmp2,
+ CFRegister cd, bool f32) {
+ CompareF(cmp1, cmp2, CUN, cd, f32);
+}
+
+void TurboAssembler::BranchTrueShortF(Label* target, CFRegister cj) {
+ bcnez(cj, target);
+}
+
+void TurboAssembler::BranchFalseShortF(Label* target, CFRegister cj) {
+ bceqz(cj, target);
+}
+
+void TurboAssembler::BranchTrueF(Label* target, CFRegister cj) {
+ // TODO(yuyin): can be optimzed
+ bool long_branch = target->is_bound()
+ ? !is_near(target, OffsetSize::kOffset21)
+ : is_trampoline_emitted();
+ if (long_branch) {
+ Label skip;
+ BranchFalseShortF(&skip, cj);
+ Branch(target);
+ bind(&skip);
+ } else {
+ BranchTrueShortF(target, cj);
+ }
+}
+
+void TurboAssembler::BranchFalseF(Label* target, CFRegister cj) {
+ bool long_branch = target->is_bound()
+ ? !is_near(target, OffsetSize::kOffset21)
+ : is_trampoline_emitted();
+ if (long_branch) {
+ Label skip;
+ BranchTrueShortF(&skip, cj);
+ Branch(target);
+ bind(&skip);
+ } else {
+ BranchFalseShortF(target, cj);
+ }
+}
+
+void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(src_low != scratch);
+ movfrh2gr_s(scratch, dst);
+ movgr2fr_w(dst, src_low);
+ movgr2frh_w(dst, scratch);
+}
+
+void TurboAssembler::Move(FPURegister dst, uint32_t src) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(static_cast<int32_t>(src)));
+ movgr2fr_w(dst, scratch);
+}
+
+void TurboAssembler::Move(FPURegister dst, uint64_t src) {
+ // Handle special values first.
+ if (src == bit_cast<uint64_t>(0.0) && has_double_zero_reg_set_) {
+ fmov_d(dst, kDoubleRegZero);
+ } else if (src == bit_cast<uint64_t>(-0.0) && has_double_zero_reg_set_) {
+ Neg_d(dst, kDoubleRegZero);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(static_cast<int64_t>(src)));
+ movgr2fr_d(dst, scratch);
+ if (dst == kDoubleRegZero) has_double_zero_reg_set_ = true;
+ }
+}
+
+void TurboAssembler::Movz(Register rd, Register rj, Register rk) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ maskeqz(scratch, rj, rk);
+ masknez(rd, rd, rk);
+ or_(rd, rd, scratch);
+}
+
+void TurboAssembler::Movn(Register rd, Register rj, Register rk) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ masknez(scratch, rj, rk);
+ maskeqz(rd, rd, rk);
+ or_(rd, rd, scratch);
+}
+
+void TurboAssembler::LoadZeroOnCondition(Register rd, Register rj,
+ const Operand& rk, Condition cond) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ mov(rd, zero_reg);
+ break;
+ case eq:
+ if (rj == zero_reg) {
+ if (rk.is_reg()) {
+ LoadZeroIfConditionZero(rd, rk.rm());
+ } else if (rk.immediate() == 0) {
+ mov(rd, zero_reg);
+ }
+ } else if (IsZero(rk)) {
+ LoadZeroIfConditionZero(rd, rj);
+ } else {
+ Sub_d(t7, rj, rk);
+ LoadZeroIfConditionZero(rd, t7);
+ }
+ break;
+ case ne:
+ if (rj == zero_reg) {
+ if (rk.is_reg()) {
+ LoadZeroIfConditionNotZero(rd, rk.rm());
+ } else if (rk.immediate() != 0) {
+ mov(rd, zero_reg);
+ }
+ } else if (IsZero(rk)) {
+ LoadZeroIfConditionNotZero(rd, rj);
+ } else {
+ Sub_d(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ }
+ break;
+
+ // Signed comparison.
+ case greater:
+ Sgt(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ break;
+ case greater_equal:
+ Sge(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj >= rk
+ break;
+ case less:
+ Slt(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj < rk
+ break;
+ case less_equal:
+ Sle(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj <= rk
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ Sgtu(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj > rk
+ break;
+
+ case Ugreater_equal:
+ Sgeu(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj >= rk
+ break;
+ case Uless:
+ Sltu(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj < rk
+ break;
+ case Uless_equal:
+ Sleu(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj <= rk
+ break;
+ default:
+ UNREACHABLE();
+ } // namespace internal
+} // namespace internal
+
+void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
+ Register condition) {
+ maskeqz(dest, dest, condition);
+}
+
+void TurboAssembler::LoadZeroIfConditionZero(Register dest,
+ Register condition) {
+ masknez(dest, dest, condition);
+}
+
+void TurboAssembler::LoadZeroIfFPUCondition(Register dest, CFRegister cc) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ movcf2gr(scratch, cc);
+ LoadZeroIfConditionNotZero(dest, scratch);
+}
+
+void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest, CFRegister cc) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ movcf2gr(scratch, cc);
+ LoadZeroIfConditionZero(dest, scratch);
+}
+
+void TurboAssembler::Clz_w(Register rd, Register rj) { clz_w(rd, rj); }
+
+void TurboAssembler::Clz_d(Register rd, Register rj) { clz_d(rd, rj); }
+
+void TurboAssembler::Ctz_w(Register rd, Register rj) { ctz_w(rd, rj); }
+
+void TurboAssembler::Ctz_d(Register rd, Register rj) { ctz_d(rd, rj); }
+
+// TODO(LOONG_dev): Optimize like arm64, use simd instruction
+void TurboAssembler::Popcnt_w(Register rd, Register rj) {
+ // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+ //
+ // A generalization of the best bit counting method to integers of
+ // bit-widths up to 128 (parameterized by type T) is this:
+ //
+ // v = v - ((v >> 1) & (T)~(T)0/3); // temp
+ // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
+ // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
+ // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
+ //
+ // There are algorithms which are faster in the cases where very few
+ // bits are set but the algorithm here attempts to minimize the total
+ // number of instructions executed even when a large number of bits
+ // are set.
+ int32_t B0 = 0x55555555; // (T)~(T)0/3
+ int32_t B1 = 0x33333333; // (T)~(T)0/15*3
+ int32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
+ int32_t value = 0x01010101; // (T)~(T)0/255
+ uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = t8;
+ srli_w(scratch, rj, 1);
+ li(scratch2, B0);
+ And(scratch, scratch, scratch2);
+ Sub_w(scratch, rj, scratch);
+ li(scratch2, B1);
+ And(rd, scratch, scratch2);
+ srli_w(scratch, scratch, 2);
+ And(scratch, scratch, scratch2);
+ Add_w(scratch, rd, scratch);
+ srli_w(rd, scratch, 4);
+ Add_w(rd, rd, scratch);
+ li(scratch2, B2);
+ And(rd, rd, scratch2);
+ li(scratch, value);
+ Mul_w(rd, rd, scratch);
+ srli_w(rd, rd, shift);
+}
+
+void TurboAssembler::Popcnt_d(Register rd, Register rj) {
+ int64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
+ int64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
+ int64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
+ int64_t value = 0x0101010101010101l; // (T)~(T)0/255
+ uint32_t shift = 56; // (sizeof(T) - 1) * BITS_PER_BYTE
+
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = t8;
+ srli_d(scratch, rj, 1);
+ li(scratch2, B0);
+ And(scratch, scratch, scratch2);
+ Sub_d(scratch, rj, scratch);
+ li(scratch2, B1);
+ And(rd, scratch, scratch2);
+ srli_d(scratch, scratch, 2);
+ And(scratch, scratch, scratch2);
+ Add_d(scratch, rd, scratch);
+ srli_d(rd, scratch, 4);
+ Add_d(rd, rd, scratch);
+ li(scratch2, B2);
+ And(rd, rd, scratch2);
+ li(scratch, value);
+ Mul_d(rd, rd, scratch);
+ srli_d(rd, rd, shift);
+}
+
+void TurboAssembler::ExtractBits(Register dest, Register source, Register pos,
+ int size, bool sign_extend) {
+ sra_d(dest, source, pos);
+ bstrpick_d(dest, dest, size - 1, 0);
+ if (sign_extend) {
+ switch (size) {
+ case 8:
+ ext_w_b(dest, dest);
+ break;
+ case 16:
+ ext_w_h(dest, dest);
+ break;
+ case 32:
+ // sign-extend word
+ slli_w(dest, dest, 0);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+void TurboAssembler::InsertBits(Register dest, Register source, Register pos,
+ int size) {
+ Rotr_d(dest, dest, pos);
+ bstrins_d(dest, source, size - 1, 0);
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Sub_d(scratch, zero_reg, pos);
+ Rotr_d(dest, dest, scratch);
+ }
+}
+
+void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister double_input,
+ Label* done) {
+ DoubleRegister single_scratch = kScratchDoubleReg.low();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+
+ ftintrz_l_d(single_scratch, double_input);
+ movfr2gr_d(scratch2, single_scratch);
+ li(scratch, 1L << 63);
+ Xor(scratch, scratch, scratch2);
+ rotri_d(scratch2, scratch, 1);
+ movfr2gr_s(result, single_scratch);
+ Branch(done, ne, scratch, Operand(scratch2));
+
+ // Truncate NaN to zero.
+ CompareIsNanF64(double_input, double_input);
+ Move(result, zero_reg);
+ bcnez(FCC0, done);
+}
+
+void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
+ Register result,
+ DoubleRegister double_input,
+ StubCallMode stub_mode) {
+ Label done;
+
+ TryInlineTruncateDoubleToI(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ Sub_d(sp, sp,
+ Operand(kDoubleSize + kSystemPointerSize)); // Put input on stack.
+ St_d(ra, MemOperand(sp, kSystemPointerSize));
+ Fst_d(double_input, MemOperand(sp, 0));
+
+#if V8_ENABLE_WEBASSEMBLY
+ if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
+ Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
+ } else {
+ Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
+ }
+
+ Pop(ra, result);
+ bind(&done);
+}
+
+// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
+#define BRANCH_ARGS_CHECK(cond, rj, rk) \
+ DCHECK((cond == cc_always && rj == zero_reg && rk.rm() == zero_reg) || \
+ (cond != cc_always && (rj != zero_reg || rk.rm() != zero_reg)))
+
+void TurboAssembler::Branch(Label* L, bool need_link) {
+ int offset = GetOffset(L, OffsetSize::kOffset26);
+ if (need_link) {
+ bl(offset);
+ } else {
+ b(offset);
+ }
+}
+
+void TurboAssembler::Branch(Label* L, Condition cond, Register rj,
+ const Operand& rk, bool need_link) {
+ if (L->is_bound()) {
+ BRANCH_ARGS_CHECK(cond, rj, rk);
+ if (!BranchShortOrFallback(L, cond, rj, rk, need_link)) {
+ if (cond != cc_always) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rj, rk, need_link);
+ Branch(L, need_link);
+ bind(&skip);
+ } else {
+ Branch(L);
+ }
+ }
+ } else {
+ if (is_trampoline_emitted()) {
+ if (cond != cc_always) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rj, rk, need_link);
+ Branch(L, need_link);
+ bind(&skip);
+ } else {
+ Branch(L);
+ }
+ } else {
+ BranchShort(L, cond, rj, rk, need_link);
+ }
+ }
+}
+
+void TurboAssembler::Branch(Label* L, Condition cond, Register rj,
+ RootIndex index) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadRoot(scratch, index);
+ Branch(L, cond, rj, Operand(scratch));
+}
+
+int32_t TurboAssembler::GetOffset(Label* L, OffsetSize bits) {
+ return branch_offset_helper(L, bits) >> 2;
+}
+
+Register TurboAssembler::GetRkAsRegisterHelper(const Operand& rk,
+ Register scratch) {
+ Register r2 = no_reg;
+ if (rk.is_reg()) {
+ r2 = rk.rm();
+ } else {
+ r2 = scratch;
+ li(r2, rk);
+ }
+
+ return r2;
+}
+
+bool TurboAssembler::BranchShortOrFallback(Label* L, Condition cond,
+ Register rj, const Operand& rk,
+ bool need_link) {
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK_NE(rj, zero_reg);
+
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ int offset = 0;
+ switch (cond) {
+ case cc_always:
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ if (need_link) {
+ bl(offset);
+ } else {
+ b(offset);
+ }
+ break;
+ case eq:
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ // beq is used here to make the code patchable. Otherwise b should
+ // be used which has no condition field so is not patchable.
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ beq(rj, rj, offset);
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset21)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset21);
+ beqz(rj, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ // We don't want any other register but scratch clobbered.
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ beq(rj, sc, offset);
+ }
+ break;
+ case ne:
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ // bne is used here to make the code patchable. Otherwise we
+ // should not generate any instruction.
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bne(rj, rj, offset);
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset21)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset21);
+ bnez(rj, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ // We don't want any other register but scratch clobbered.
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bne(rj, sc, offset);
+ }
+ break;
+
+ // Signed comparison.
+ case greater:
+ // rj > rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ // No code needs to be emitted.
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ blt(zero_reg, rj, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ blt(sc, rj, offset);
+ }
+ break;
+ case greater_equal:
+ // rj >= rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ b(offset);
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bge(rj, zero_reg, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bge(rj, sc, offset);
+ }
+ break;
+ case less:
+ // rj < rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ // No code needs to be emitted.
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ blt(rj, zero_reg, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ blt(rj, sc, offset);
+ }
+ break;
+ case less_equal:
+ // rj <= rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ b(offset);
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bge(zero_reg, rj, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bge(sc, rj, offset);
+ }
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ // rj > rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ // No code needs to be emitted.
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ bnez(rj, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bltu(sc, rj, offset);
+ }
+ break;
+ case Ugreater_equal:
+ // rj >= rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ b(offset);
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ b(offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bgeu(rj, sc, offset);
+ }
+ break;
+ case Uless:
+ // rj < rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ // No code needs to be emitted.
+ } else if (IsZero(rk)) {
+ // No code needs to be emitted.
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bltu(rj, sc, offset);
+ }
+ break;
+ case Uless_equal:
+ // rj <= rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ b(offset);
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset21)) return false;
+ if (need_link) pcaddi(ra, 2);
+ beqz(rj, L);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bgeu(sc, rj, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ return true;
+}
+
+void TurboAssembler::BranchShort(Label* L, Condition cond, Register rj,
+ const Operand& rk, bool need_link) {
+ BRANCH_ARGS_CHECK(cond, rj, rk);
+ bool result = BranchShortOrFallback(L, cond, rj, rk, need_link);
+ DCHECK(result);
+ USE(result);
+}
+
+void TurboAssembler::LoadFromConstantsTable(Register destination,
+ int constant_index) {
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
+ LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
+ Ld_d(destination,
+ FieldMemOperand(destination, FixedArray::kHeaderSize +
+ constant_index * kPointerSize));
+}
+
+void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+ Ld_d(destination, MemOperand(kRootRegister, offset));
+}
+
+void TurboAssembler::LoadRootRegisterOffset(Register destination,
+ intptr_t offset) {
+ if (offset == 0) {
+ Move(destination, kRootRegister);
+ } else {
+ Add_d(destination, kRootRegister, Operand(offset));
+ }
+}
+
+void TurboAssembler::Jump(Register target, Condition cond, Register rj,
+ const Operand& rk) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (cond == cc_always) {
+ jirl(zero_reg, target, 0);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rj, rk);
+ Label skip;
+ Branch(&skip, NegateCondition(cond), rj, rk);
+ jirl(zero_reg, target, 0);
+ bind(&skip);
+ }
+}
+
+void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+ Condition cond, Register rj, const Operand& rk) {
+ Label skip;
+ if (cond != cc_always) {
+ Branch(&skip, NegateCondition(cond), rj, rk);
+ }
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ li(t7, Operand(target, rmode));
+ jirl(zero_reg, t7, 0);
+ bind(&skip);
+ }
+}
+
+void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
+ Register rj, const Operand& rk) {
+ DCHECK(!RelocInfo::IsCodeTarget(rmode));
+ Jump(static_cast<intptr_t>(target), rmode, cond, rj, rk);
+}
+
+void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond, Register rj, const Operand& rk) {
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
+
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label skip;
+ if (cond != cc_always) {
+ BranchShort(&skip, NegateCondition(cond), rj, rk);
+ }
+
+ Builtin builtin = Builtin::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin) &&
+ Builtins::IsIsolateIndependent(builtin);
+ if (target_is_isolate_independent_builtin &&
+ options().use_pc_relative_calls_and_jumps) {
+ int32_t code_target_index = AddCodeTarget(code);
+ RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
+ b(code_target_index);
+ bind(&skip);
+ return;
+ } else if (root_array_available_ && options().isolate_independent_code) {
+ UNREACHABLE();
+ /*int offset = code->builtin_index() * kSystemPointerSize +
+ IsolateData::builtin_entry_table_offset();
+ Ld_d(t7, MemOperand(kRootRegister, offset));
+ Jump(t7, cc_always, rj, rk);
+ bind(&skip);
+ return;*/
+ } else if (options().inline_offheap_trampolines &&
+ target_is_isolate_independent_builtin) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin);
+ li(t7, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ Jump(t7, cc_always, rj, rk);
+ bind(&skip);
+ return;
+ }
+
+ Jump(static_cast<intptr_t>(code.address()), rmode, cc_always, rj, rk);
+ bind(&skip);
+}
+
+void TurboAssembler::Jump(const ExternalReference& reference) {
+ li(t7, reference);
+ Jump(t7);
+}
+
+// Note: To call gcc-compiled C code on loonarch, you must call through t[0-8].
+void TurboAssembler::Call(Register target, Condition cond, Register rj,
+ const Operand& rk) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (cond == cc_always) {
+ jirl(ra, target, 0);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rj, rk);
+ Label skip;
+ Branch(&skip, NegateCondition(cond), rj, rk);
+ jirl(ra, target, 0);
+ bind(&skip);
+ }
+ set_last_call_pc_(pc_);
+}
+
+void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit,
+ Label* on_in_range) {
+ if (lower_limit != 0) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Sub_d(scratch, value, Operand(lower_limit));
+ Branch(on_in_range, ls, scratch, Operand(higher_limit - lower_limit));
+ } else {
+ Branch(on_in_range, ls, value, Operand(higher_limit - lower_limit));
+ }
+}
+
+void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
+ Register rj, const Operand& rk) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label skip;
+ if (cond != cc_always) {
+ BranchShort(&skip, NegateCondition(cond), rj, rk);
+ }
+ intptr_t offset_diff = target - pc_offset();
+ if (RelocInfo::IsNone(rmode) && is_int28(offset_diff)) {
+ bl(offset_diff >> 2);
+ } else if (RelocInfo::IsNone(rmode) && is_int38(offset_diff)) {
+ pcaddu18i(t7, static_cast<int32_t>(offset_diff) >> 18);
+ jirl(ra, t7, (offset_diff & 0x3ffff) >> 2);
+ } else {
+ li(t7, Operand(static_cast<int64_t>(target), rmode), ADDRESS_LOAD);
+ Call(t7, cc_always, rj, rk);
+ }
+ bind(&skip);
+}
+
+void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond, Register rj, const Operand& rk) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label skip;
+ if (cond != cc_always) {
+ BranchShort(&skip, NegateCondition(cond), rj, rk);
+ }
+
+ Builtin builtin = Builtin::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin) &&
+ Builtins::IsIsolateIndependent(builtin);
+
+ if (target_is_isolate_independent_builtin &&
+ options().use_pc_relative_calls_and_jumps) {
+ int32_t code_target_index = AddCodeTarget(code);
+ RecordCommentForOffHeapTrampoline(builtin);
+ RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
+ bl(code_target_index);
+ set_last_call_pc_(pc_);
+ bind(&skip);
+ RecordComment("]");
+ return;
+ } else if (root_array_available_ && options().isolate_independent_code) {
+ UNREACHABLE();
+ /*int offset = code->builtin_index() * kSystemPointerSize +
+ IsolateData::builtin_entry_table_offset();
+ LoadRootRelative(t7, offset);
+ Call(t7, cond, rj, rk);
+ bind(&skip);
+ return;*/
+ } else if (options().inline_offheap_trampolines &&
+ target_is_isolate_independent_builtin) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin);
+ li(t7, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ Call(t7, cond, rj, rk);
+ bind(&skip);
+ return;
+ }
+
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
+ DCHECK(code->IsExecutable());
+ Call(code.address(), rmode, cc_always, rj, rk);
+ bind(&skip);
+}
+
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+ STATIC_ASSERT(kSystemPointerSize == 8);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+
+ // The builtin_index register contains the builtin index as a Smi.
+ SmiUntag(builtin_index, builtin_index);
+ Alsl_d(builtin_index, builtin_index, kRootRegister, kSystemPointerSizeLog2,
+ t7);
+ Ld_d(builtin_index,
+ MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
+}
+
+void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+ Register destination) {
+ Ld_d(destination, EntryFromBuiltinAsOperand(builtin));
+}
+MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+ DCHECK(root_array_available());
+ return MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(builtin));
+}
+
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ LoadEntryFromBuiltinIndex(builtin_index);
+ Call(builtin_index);
+}
+void TurboAssembler::CallBuiltin(Builtin builtin) {
+ RecordCommentForOffHeapTrampoline(builtin);
+ Call(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
+ if (FLAG_code_comments) RecordComment("]");
+}
+
+void TurboAssembler::PatchAndJump(Address target) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ pcaddi(scratch, 4);
+ Ld_d(t7, MemOperand(scratch, 0));
+ jirl(zero_reg, t7, 0);
+ nop();
+ DCHECK_EQ(reinterpret_cast<uint64_t>(pc_) % 8, 0);
+ *reinterpret_cast<uint64_t*>(pc_) = target; // pc_ should be align.
+ pc_ += sizeof(uint64_t);
+}
+
+void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+ // This generates the final instruction sequence for calls to C functions
+ // once an exit frame has been constructed.
+ //
+ // Note that this assumes the caller code (i.e. the Code object currently
+ // being generated) is immovable or that the callee function cannot trigger
+ // GC, since the callee function will return to it.
+
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
+ static constexpr int kNumInstructionsToJump = 2;
+ Label find_ra;
+ // Adjust the value in ra to point to the correct return location, 2nd
+ // instruction past the real call into C code (the jirl)), and push it.
+ // This is the return address of the exit frame.
+ pcaddi(ra, kNumInstructionsToJump + 1);
+ bind(&find_ra);
+
+ // This spot was reserved in EnterExitFrame.
+ St_d(ra, MemOperand(sp, 0));
+ // Stack is still aligned.
+
+ // TODO(LOONG_dev): can be jirl target? a0 -- a7?
+ jirl(zero_reg, target, 0);
+ // Make sure the stored 'ra' points to this position.
+ DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra));
+}
+
+void TurboAssembler::Ret(Condition cond, Register rj, const Operand& rk) {
+ Jump(ra, cond, rj, rk);
+}
+
+void TurboAssembler::Drop(int count, Condition cond, Register reg,
+ const Operand& op) {
+ if (count <= 0) {
+ return;
+ }
+
+ Label skip;
+
+ if (cond != al) {
+ Branch(&skip, NegateCondition(cond), reg, op);
+ }
+
+ Add_d(sp, sp, Operand(count * kPointerSize));
+
+ if (cond != al) {
+ bind(&skip);
+ }
+}
+
+void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
+ if (scratch == no_reg) {
+ Xor(reg1, reg1, Operand(reg2));
+ Xor(reg2, reg2, Operand(reg1));
+ Xor(reg1, reg1, Operand(reg2));
+ } else {
+ mov(scratch, reg1);
+ mov(reg1, reg2);
+ mov(reg2, scratch);
+ }
+}
+
+void TurboAssembler::Call(Label* target) { Branch(target, true); }
+
+void TurboAssembler::Push(Smi smi) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(smi));
+ Push(scratch);
+}
+
+void TurboAssembler::Push(Handle<HeapObject> handle) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(handle));
+ Push(scratch);
+}
+
+void TurboAssembler::PushArray(Register array, Register size, Register scratch,
+ Register scratch2, PushArrayOrder order) {
+ DCHECK(!AreAliased(array, size, scratch, scratch2));
+ Label loop, entry;
+ if (order == PushArrayOrder::kReverse) {
+ mov(scratch, zero_reg);
+ jmp(&entry);
+ bind(&loop);
+ Alsl_d(scratch2, scratch, array, kPointerSizeLog2, t7);
+ Ld_d(scratch2, MemOperand(scratch2, 0));
+ Push(scratch2);
+ Add_d(scratch, scratch, Operand(1));
+ bind(&entry);
+ Branch(&loop, less, scratch, Operand(size));
+ } else {
+ mov(scratch, size);
+ jmp(&entry);
+ bind(&loop);
+ Alsl_d(scratch2, scratch, array, kPointerSizeLog2, t7);
+ Ld_d(scratch2, MemOperand(scratch2, 0));
+ Push(scratch2);
+ bind(&entry);
+ Add_d(scratch, scratch, Operand(-1));
+ Branch(&loop, greater_equal, scratch, Operand(zero_reg));
+ }
+}
+
+// ---------------------------------------------------------------------------
+// Exception handling.
+
+void MacroAssembler::PushStackHandler() {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+
+ Push(Smi::zero()); // Padding.
+
+ // Link the current handler as the next handler.
+ li(t2,
+ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
+ Ld_d(t1, MemOperand(t2, 0));
+ Push(t1);
+
+ // Set this new handler as the current one.
+ St_d(sp, MemOperand(t2, 0));
+}
+
+void MacroAssembler::PopStackHandler() {
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ Pop(a1);
+ Add_d(sp, sp,
+ Operand(
+ static_cast<int64_t>(StackHandlerConstants::kSize - kPointerSize)));
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch,
+ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
+ St_d(a1, MemOperand(scratch, 0));
+}
+
+void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
+ const DoubleRegister src) {
+ fsub_d(dst, src, kDoubleRegZero);
+}
+
+// -----------------------------------------------------------------------------
+// JavaScript invokes.
+
+void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ DCHECK(root_array_available());
+ Isolate* isolate = this->isolate();
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ CHECK(is_int32(offset));
+ Ld_d(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
+}
+
+void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
+ Register scratch2,
+ Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+
+ LoadStackLimit(scratch1, StackLimitKind::kRealStackLimit);
+ // Make scratch1 the space we have left. The stack might already be overflowed
+ // here which will cause scratch1 to become negative.
+ sub_d(scratch1, sp, scratch1);
+ // Check if the arguments will overflow the stack.
+ slli_d(scratch2, num_args, kPointerSizeLog2);
+ // Signed comparison.
+ Branch(stack_overflow, le, scratch1, Operand(scratch2));
+}
+
+void MacroAssembler::InvokePrologue(Register expected_parameter_count,
+ Register actual_parameter_count,
+ Label* done, InvokeType type) {
+ Label regular_invoke;
+
+ // a0: actual arguments count
+ // a1: function (passed through to callee)
+ // a2: expected arguments count
+
+ DCHECK_EQ(actual_parameter_count, a0);
+ DCHECK_EQ(expected_parameter_count, a2);
+
+ // If the expected parameter count is equal to the adaptor sentinel, no need
+ // to push undefined value as arguments.
+ Branch(&regular_invoke, eq, expected_parameter_count,
+ Operand(kDontAdaptArgumentsSentinel));
+
+ // If overapplication or if the actual argument count is equal to the
+ // formal parameter count, no need to push extra undefined values.
+ sub_d(expected_parameter_count, expected_parameter_count,
+ actual_parameter_count);
+ Branch(&regular_invoke, le, expected_parameter_count, Operand(zero_reg));
+
+ Label stack_overflow;
+ StackOverflowCheck(expected_parameter_count, t0, t1, &stack_overflow);
+ // Underapplication. Move the arguments already in the stack, including the
+ // receiver and the return address.
+ {
+ Label copy;
+ Register src = a6, dest = a7;
+ mov(src, sp);
+ slli_d(t0, expected_parameter_count, kSystemPointerSizeLog2);
+ Sub_d(sp, sp, Operand(t0));
+ // Update stack pointer.
+ mov(dest, sp);
+ mov(t0, actual_parameter_count);
+ bind(&copy);
+ Ld_d(t1, MemOperand(src, 0));
+ St_d(t1, MemOperand(dest, 0));
+ Sub_d(t0, t0, Operand(1));
+ Add_d(src, src, Operand(kSystemPointerSize));
+ Add_d(dest, dest, Operand(kSystemPointerSize));
+ Branch(&copy, ge, t0, Operand(zero_reg));
+ }
+
+ // Fill remaining expected arguments with undefined values.
+ LoadRoot(t0, RootIndex::kUndefinedValue);
+ {
+ Label loop;
+ bind(&loop);
+ St_d(t0, MemOperand(a7, 0));
+ Sub_d(expected_parameter_count, expected_parameter_count, Operand(1));
+ Add_d(a7, a7, Operand(kSystemPointerSize));
+ Branch(&loop, gt, expected_parameter_count, Operand(zero_reg));
+ }
+ b(&regular_invoke);
+
+ bind(&stack_overflow);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ CallRuntime(Runtime::kThrowStackOverflow);
+ break_(0xCC);
+ }
+
+ bind(&regular_invoke);
+}
+
+void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
+ Register expected_parameter_count,
+ Register actual_parameter_count) {
+ // Load receiver to pass it later to DebugOnFunctionCall hook.
+ LoadReceiver(t0, actual_parameter_count);
+ FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+
+ SmiTag(expected_parameter_count);
+ Push(expected_parameter_count);
+
+ SmiTag(actual_parameter_count);
+ Push(actual_parameter_count);
+
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ // TODO(LOONG_dev): MultiPush/Pop
+ Push(fun);
+ Push(fun);
+ Push(t0);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+
+ Pop(actual_parameter_count);
+ SmiUntag(actual_parameter_count);
+
+ Pop(expected_parameter_count);
+ SmiUntag(expected_parameter_count);
+}
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ Register expected_parameter_count,
+ Register actual_parameter_count,
+ InvokeType type) {
+ // You can't call a function without a valid frame.
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
+ DCHECK_EQ(function, a1);
+ DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
+
+ // On function call, call into the debugger if necessary.
+ Label debug_hook, continue_after_hook;
+ {
+ li(t0, ExternalReference::debug_hook_on_function_call_address(isolate()));
+ Ld_b(t0, MemOperand(t0, 0));
+ BranchShort(&debug_hook, ne, t0, Operand(zero_reg));
+ }
+ bind(&continue_after_hook);
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(a3, RootIndex::kUndefinedValue);
+ }
+
+ Label done;
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Register code = kJavaScriptCallCodeStartRegister;
+ Ld_d(code, FieldMemOperand(function, JSFunction::kCodeOffset));
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(code);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(code);
+ break;
+ }
+
+ Branch(&done);
+
+ // Deferred debug hook.
+ bind(&debug_hook);
+ CallDebugOnFunctionCall(function, new_target, expected_parameter_count,
+ actual_parameter_count);
+ Branch(&continue_after_hook);
+
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
+}
+
+void MacroAssembler::InvokeFunctionWithNewTarget(
+ Register function, Register new_target, Register actual_parameter_count,
+ InvokeType type) {
+ // You can't call a function without a valid frame.
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
+
+ // Contract with called JS functions requires that function is passed in a1.
+ DCHECK_EQ(function, a1);
+ Register expected_parameter_count = a2;
+ Register temp_reg = t0;
+ Ld_d(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // The argument count is stored as uint16_t
+ Ld_hu(expected_parameter_count,
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+
+ InvokeFunctionCode(a1, new_target, expected_parameter_count,
+ actual_parameter_count, type);
+}
+
+void MacroAssembler::InvokeFunction(Register function,
+ Register expected_parameter_count,
+ Register actual_parameter_count,
+ InvokeType type) {
+ // You can't call a function without a valid frame.
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
+
+ // Contract with called JS functions requires that function is passed in a1.
+ DCHECK_EQ(function, a1);
+
+ // Get the function and setup the context.
+ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ InvokeFunctionCode(a1, no_reg, expected_parameter_count,
+ actual_parameter_count, type);
+}
+
+// ---------------------------------------------------------------------------
+// Support functions.
+
+void MacroAssembler::GetObjectType(Register object, Register map,
+ Register type_reg) {
+ LoadMap(map, object);
+ Ld_hu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+}
+
+void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit,
+ Register range) {
+ Ld_hu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Sub_d(range, type_reg, Operand(lower_limit));
+}
+
+// -----------------------------------------------------------------------------
+// Runtime calls.
+
+void TurboAssembler::AddOverflow_d(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ Register right_reg = no_reg;
+ if (!right.is_reg()) {
+ li(scratch, Operand(right));
+ right_reg = scratch;
+ } else {
+ right_reg = right.rm();
+ }
+
+ DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
+ overflow != scratch2);
+ DCHECK(overflow != left && overflow != right_reg);
+
+ if (dst == left || dst == right_reg) {
+ add_d(scratch2, left, right_reg);
+ xor_(overflow, scratch2, left);
+ xor_(scratch, scratch2, right_reg);
+ and_(overflow, overflow, scratch);
+ mov(dst, scratch2);
+ } else {
+ add_d(dst, left, right_reg);
+ xor_(overflow, dst, left);
+ xor_(scratch, dst, right_reg);
+ and_(overflow, overflow, scratch);
+ }
+}
+
+void TurboAssembler::SubOverflow_d(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ Register right_reg = no_reg;
+ if (!right.is_reg()) {
+ li(scratch, Operand(right));
+ right_reg = scratch;
+ } else {
+ right_reg = right.rm();
+ }
+
+ DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
+ overflow != scratch2);
+ DCHECK(overflow != left && overflow != right_reg);
+
+ if (dst == left || dst == right_reg) {
+ Sub_d(scratch2, left, right_reg);
+ xor_(overflow, left, scratch2);
+ xor_(scratch, left, right_reg);
+ and_(overflow, overflow, scratch);
+ mov(dst, scratch2);
+ } else {
+ sub_d(dst, left, right_reg);
+ xor_(overflow, left, dst);
+ xor_(scratch, left, right_reg);
+ and_(overflow, overflow, scratch);
+ }
+}
+
+void TurboAssembler::MulOverflow_w(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ Register right_reg = no_reg;
+ if (!right.is_reg()) {
+ li(scratch, Operand(right));
+ right_reg = scratch;
+ } else {
+ right_reg = right.rm();
+ }
+
+ DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
+ overflow != scratch2);
+ DCHECK(overflow != left && overflow != right_reg);
+
+ if (dst == left || dst == right_reg) {
+ Mul_w(scratch2, left, right_reg);
+ Mulh_w(overflow, left, right_reg);
+ mov(dst, scratch2);
+ } else {
+ Mul_w(dst, left, right_reg);
+ Mulh_w(overflow, left, right_reg);
+ }
+
+ srai_d(scratch2, dst, 32);
+ xor_(overflow, overflow, scratch2);
+}
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles) {
+ // All parameters are on the stack. v0 has the return value after call.
+
+ // If the expected number of arguments of the runtime function is
+ // constant, we check that the actual number of arguments match the
+ // expectation.
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
+
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ PrepareCEntryArgs(num_arguments);
+ PrepareCEntryFunction(ExternalReference::Create(f));
+ Handle<Code> code =
+ CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
+ Call(code, RelocInfo::CODE_TARGET);
+}
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ PrepareCEntryArgs(function->nargs);
+ }
+ JumpToExternalReference(ExternalReference::Create(fid));
+}
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame) {
+ PrepareCEntryFunction(builtin);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
+ Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
+}
+
+void MacroAssembler::JumpToInstructionStream(Address entry) {
+ li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Jump(kOffHeapTrampolineRegister);
+}
+
+void MacroAssembler::LoadWeakValue(Register out, Register in,
+ Label* target_if_cleared) {
+ Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32));
+
+ And(out, in, Operand(~kWeakHeapObjectMask));
+}
+
+void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
+ DCHECK_GT(value, 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ // This operation has to be exactly 32-bit wide in case the external
+ // reference table redirects the counter to a uint32_t dummy_stats_counter_
+ // field.
+ li(scratch2, ExternalReference::Create(counter));
+ Ld_w(scratch1, MemOperand(scratch2, 0));
+ Add_w(scratch1, scratch1, Operand(value));
+ St_w(scratch1, MemOperand(scratch2, 0));
+ }
+}
+
+void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
+ DCHECK_GT(value, 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ // This operation has to be exactly 32-bit wide in case the external
+ // reference table redirects the counter to a uint32_t dummy_stats_counter_
+ // field.
+ li(scratch2, ExternalReference::Create(counter));
+ Ld_w(scratch1, MemOperand(scratch2, 0));
+ Sub_w(scratch1, scratch1, Operand(value));
+ St_w(scratch1, MemOperand(scratch2, 0));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Debugging.
+
+void TurboAssembler::Trap() { stop(); }
+void TurboAssembler::DebugBreak() { stop(); }
+
+void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
+ Operand rk) {
+ if (FLAG_debug_code) Check(cc, reason, rs, rk);
+}
+
+void TurboAssembler::Check(Condition cc, AbortReason reason, Register rj,
+ Operand rk) {
+ Label L;
+ Branch(&L, cc, rj, rk);
+ Abort(reason);
+ // Will not return here.
+ bind(&L);
+}
+
+void TurboAssembler::Abort(AbortReason reason) {
+ Label abort_start;
+ bind(&abort_start);
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
+
+ // Avoid emitting call to builtin if requested.
+ if (trap_on_abort()) {
+ stop();
+ return;
+ }
+
+ if (should_abort_hard()) {
+ // We don't care if we constructed a frame. Just pretend we did.
+ FrameScope assume_frame(this, StackFrame::NONE);
+ PrepareCallCFunction(0, a0);
+ li(a0, Operand(static_cast<int>(reason)));
+ CallCFunction(ExternalReference::abort_with_reason(), 1);
+ return;
+ }
+
+ Move(a0, Smi::FromInt(static_cast<int>(reason)));
+
+ // Disable stub call restrictions to always allow calls to abort.
+ if (!has_frame()) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
+ } else {
+ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
+ }
+ // Will not return here.
+ if (is_trampoline_pool_blocked()) {
+ // If the calling code cares about the exact number of
+ // instructions generated, we insert padding here to keep the size
+ // of the Abort macro constant.
+ // Currently in debug mode with debug_code enabled the number of
+ // generated instructions is 10, so we use this as a maximum value.
+ static const int kExpectedAbortInstructions = 10;
+ int abort_instructions = InstructionsGeneratedSince(&abort_start);
+ DCHECK_LE(abort_instructions, kExpectedAbortInstructions);
+ while (abort_instructions++ < kExpectedAbortInstructions) {
+ nop();
+ }
+ }
+}
+
+void TurboAssembler::LoadMap(Register destination, Register object) {
+ Ld_d(destination, FieldMemOperand(object, HeapObject::kMapOffset));
+}
+
+void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
+ LoadMap(dst, cp);
+ Ld_d(dst, FieldMemOperand(
+ dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
+ Ld_d(dst, MemOperand(dst, Context::SlotOffset(index)));
+}
+
+void TurboAssembler::StubPrologue(StackFrame::Type type) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(StackFrame::TypeToMarker(type)));
+ PushCommonFrame(scratch);
+}
+
+void TurboAssembler::Prologue() { PushStandardFrame(a1); }
+
+void TurboAssembler::EnterFrame(StackFrame::Type type) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Push(ra, fp);
+ Move(fp, sp);
+ if (!StackFrame::IsJavaScript(type)) {
+ li(kScratchReg, Operand(StackFrame::TypeToMarker(type)));
+ Push(kScratchReg);
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
+#endif // V8_ENABLE_WEBASSEMBLY
+}
+
+void TurboAssembler::LeaveFrame(StackFrame::Type type) {
+ addi_d(sp, fp, 2 * kPointerSize);
+ Ld_d(ra, MemOperand(fp, 1 * kPointerSize));
+ Ld_d(fp, MemOperand(fp, 0 * kPointerSize));
+}
+
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+ StackFrame::Type frame_type) {
+ DCHECK(frame_type == StackFrame::EXIT ||
+ frame_type == StackFrame::BUILTIN_EXIT);
+
+ // Set up the frame structure on the stack.
+ STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
+ STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
+ STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
+
+ // This is how the stack will look:
+ // fp + 2 (==kCallerSPDisplacement) - old stack's end
+ // [fp + 1 (==kCallerPCOffset)] - saved old ra
+ // [fp + 0 (==kCallerFPOffset)] - saved old fp
+ // [fp - 1 StackFrame::EXIT Smi
+ // [fp - 2 (==kSPOffset)] - sp of the called function
+ // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
+ // new stack (will contain saved ra)
+
+ // Save registers and reserve room for saved entry sp.
+ addi_d(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
+ St_d(ra, MemOperand(sp, 3 * kPointerSize));
+ St_d(fp, MemOperand(sp, 2 * kPointerSize));
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
+ St_d(scratch, MemOperand(sp, 1 * kPointerSize));
+ }
+ // Set up new frame pointer.
+ addi_d(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
+
+ if (FLAG_debug_code) {
+ St_d(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ }
+
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ // Save the frame pointer and the context in top.
+ li(t8, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ isolate()));
+ St_d(fp, MemOperand(t8, 0));
+ li(t8,
+ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
+ St_d(cp, MemOperand(t8, 0));
+ }
+
+ const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ if (save_doubles) {
+ // The stack is already aligned to 0 modulo 8 for stores with sdc1.
+ int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
+ int space = kNumOfSavedRegisters * kDoubleSize;
+ Sub_d(sp, sp, Operand(space));
+ // Remember: we only need to save every 2nd double FPU value.
+ for (int i = 0; i < kNumOfSavedRegisters; i++) {
+ FPURegister reg = FPURegister::from_code(2 * i);
+ Fst_d(reg, MemOperand(sp, i * kDoubleSize));
+ }
+ }
+
+ // Reserve place for the return address, stack space and an optional slot
+ // (used by DirectCEntry to hold the return value if a struct is
+ // returned) and align the frame preparing for calling the runtime function.
+ DCHECK_GE(stack_space, 0);
+ Sub_d(sp, sp, Operand((stack_space + 2) * kPointerSize));
+ if (frame_alignment > 0) {
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
+ And(sp, sp, Operand(-frame_alignment)); // Align stack.
+ }
+
+ // Set the exit frame sp value to point just before the return address
+ // location.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ addi_d(scratch, sp, kPointerSize);
+ St_d(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
+}
+
+void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
+ bool do_return,
+ bool argument_count_is_length) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ // Optionally restore all double registers.
+ if (save_doubles) {
+ // Remember: we only need to restore every 2nd double FPU value.
+ int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
+ Sub_d(t8, fp,
+ Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
+ kNumOfSavedRegisters * kDoubleSize));
+ for (int i = 0; i < kNumOfSavedRegisters; i++) {
+ FPURegister reg = FPURegister::from_code(2 * i);
+ Fld_d(reg, MemOperand(t8, i * kDoubleSize));
+ }
+ }
+
+ // Clear top frame.
+ li(t8,
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()));
+ St_d(zero_reg, MemOperand(t8, 0));
+
+ // Restore current context from top and clear it in debug mode.
+ li(t8,
+ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
+ Ld_d(cp, MemOperand(t8, 0));
+
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temp(this);
+ Register scratch = temp.Acquire();
+ li(scratch, Operand(Context::kInvalidContext));
+ St_d(scratch, MemOperand(t8, 0));
+ }
+
+ // Pop the arguments, restore registers, and return.
+ mov(sp, fp); // Respect ABI stack constraint.
+ Ld_d(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
+ Ld_d(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
+
+ if (argument_count.is_valid()) {
+ if (argument_count_is_length) {
+ add_d(sp, sp, argument_count);
+ } else {
+ Alsl_d(sp, argument_count, sp, kPointerSizeLog2, t8);
+ }
+ }
+
+ addi_d(sp, sp, 2 * kPointerSize);
+ if (do_return) {
+ Ret();
+ }
+}
+
+int TurboAssembler::ActivationFrameAlignment() {
+#if V8_HOST_ARCH_LOONG64
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one LOONG64
+ // platform for another LOONG64 platform with a different alignment.
+ return base::OS::ActivationFrameAlignment();
+#else // V8_HOST_ARCH_LOONG64
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so this is controlled from a
+ // flag.
+ return FLAG_sim_stack_alignment;
+#endif // V8_HOST_ARCH_LOONG64
+}
+
+void MacroAssembler::AssertStackIsAligned() {
+ if (FLAG_debug_code) {
+ const int frame_alignment = ActivationFrameAlignment();
+ const int frame_alignment_mask = frame_alignment - 1;
+
+ if (frame_alignment > kPointerSize) {
+ Label alignment_as_expected;
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, sp, frame_alignment_mask);
+ Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
+ }
+ // Don't use Check here, as it will call Runtime_Abort re-entering here.
+ stop();
+ bind(&alignment_as_expected);
+ }
+ }
+}
+
+void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
+ if (SmiValuesAre32Bits()) {
+ Ld_w(dst, MemOperand(src.base(), SmiWordOffset(src.offset())));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ Ld_w(dst, src);
+ SmiUntag(dst);
+ }
+}
+
+void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
+ DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, value, kSmiTagMask);
+ Branch(smi_label, eq, scratch, Operand(zero_reg));
+}
+
+void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
+ DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, value, kSmiTagMask);
+ Branch(not_smi_label, ne, scratch, Operand(zero_reg));
+}
+
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (FLAG_debug_code) {
+ STATIC_ASSERT(kSmiTag == 0);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, object, kSmiTagMask);
+ Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
+ }
+}
+
+void MacroAssembler::AssertSmi(Register object) {
+ if (FLAG_debug_code) {
+ STATIC_ASSERT(kSmiTag == 0);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, object, kSmiTagMask);
+ Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
+ }
+}
+
+void MacroAssembler::AssertConstructor(Register object) {
+ if (FLAG_debug_code) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8,
+ Operand(zero_reg));
+
+ LoadMap(t8, object);
+ Ld_bu(t8, FieldMemOperand(t8, Map::kBitFieldOffset));
+ And(t8, t8, Operand(Map::Bits1::IsConstructorBit::kMask));
+ Check(ne, AbortReason::kOperandIsNotAConstructor, t8, Operand(zero_reg));
+ }
+}
+
+void MacroAssembler::AssertFunction(Register object) {
+ if (FLAG_debug_code) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
+ Operand(zero_reg));
+ Push(object);
+ LoadMap(object, object);
+ GetInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE, t8);
+ Check(ls, AbortReason::kOperandIsNotAFunction, t8,
+ Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
+ Pop(object);
+ }
+}
+
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (FLAG_debug_code) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8,
+ Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(eq, AbortReason::kOperandIsNotABoundFunction, t8,
+ Operand(JS_BOUND_FUNCTION_TYPE));
+ }
+}
+
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (!FLAG_debug_code) return;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8,
+ Operand(zero_reg));
+
+ GetObjectType(object, t8, t8);
+
+ Label done;
+
+ // Check if JSGeneratorObject
+ Branch(&done, eq, t8, Operand(JS_GENERATOR_OBJECT_TYPE));
+
+ // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
+ Branch(&done, eq, t8, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
+
+ // Check if JSAsyncGeneratorObject
+ Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
+
+ Abort(AbortReason::kOperandIsNotAGeneratorObject);
+
+ bind(&done);
+}
+
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (FLAG_debug_code) {
+ Label done_checking;
+ AssertNotSmi(object);
+ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ Branch(&done_checking, eq, object, Operand(scratch));
+ GetObjectType(object, scratch, scratch);
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch,
+ Operand(ALLOCATION_SITE_TYPE));
+ bind(&done_checking);
+ }
+}
+
+void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1 == src2) {
+ Move_s(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ CompareIsNanF32(src1, src2);
+ BranchTrueF(out_of_line);
+
+ fmax_s(dst, src1, src2);
+}
+
+void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ fadd_s(dst, src1, src2);
+}
+
+void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1 == src2) {
+ Move_s(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ CompareIsNanF32(src1, src2);
+ BranchTrueF(out_of_line);
+
+ fmin_s(dst, src1, src2);
+}
+
+void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ fadd_s(dst, src1, src2);
+}
+
+void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1 == src2) {
+ Move_d(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ CompareIsNanF64(src1, src2);
+ BranchTrueF(out_of_line);
+
+ fmax_d(dst, src1, src2);
+}
+
+void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ fadd_d(dst, src1, src2);
+}
+
+void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1 == src2) {
+ Move_d(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ CompareIsNanF64(src1, src2);
+ BranchTrueF(out_of_line);
+
+ fmin_d(dst, src1, src2);
+}
+
+void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ fadd_d(dst, src1, src2);
+}
+
+static const int kRegisterPassedArguments = 8;
+
+int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments) {
+ int stack_passed_words = 0;
+ num_reg_arguments += 2 * num_double_arguments;
+
+ // Up to eight simple arguments are passed in registers a0..a7.
+ if (num_reg_arguments > kRegisterPassedArguments) {
+ stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
+ }
+ return stack_passed_words;
+}
+
+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+ int num_double_arguments,
+ Register scratch) {
+ int frame_alignment = ActivationFrameAlignment();
+
+ // Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
+ // Remaining arguments are pushed on the stack.
+ int stack_passed_arguments =
+ CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
+ if (frame_alignment > kPointerSize) {
+ // Make stack end at alignment and make room for num_arguments - 4 words
+ // and the original value of sp.
+ mov(scratch, sp);
+ Sub_d(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
+ bstrins_d(sp, zero_reg, std::log2(frame_alignment) - 1, 0);
+ St_d(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ Sub_d(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+ }
+}
+
+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+ Register scratch) {
+ PrepareCallCFunction(num_reg_arguments, 0, scratch);
+}
+
+void TurboAssembler::CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ li(t7, function);
+ CallCFunctionHelper(t7, num_reg_arguments, num_double_arguments);
+}
+
+void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments) {
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+}
+
+void TurboAssembler::CallCFunction(ExternalReference function,
+ int num_arguments) {
+ CallCFunction(function, num_arguments, 0);
+}
+
+void TurboAssembler::CallCFunction(Register function, int num_arguments) {
+ CallCFunction(function, num_arguments, 0);
+}
+
+void TurboAssembler::CallCFunctionHelper(Register function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
+ DCHECK(has_frame());
+ // Make sure that the stack is aligned before calling a C function unless
+ // running in the simulator. The simulator has its own alignment check which
+ // provides more information.
+
+#if V8_HOST_ARCH_LOONG64
+ if (FLAG_debug_code) {
+ int frame_alignment = base::OS::ActivationFrameAlignment();
+ int frame_alignment_mask = frame_alignment - 1;
+ if (frame_alignment > kPointerSize) {
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
+ Label alignment_as_expected;
+ {
+ Register scratch = t8;
+ And(scratch, sp, Operand(frame_alignment_mask));
+ Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
+ }
+ // Don't use Check here, as it will call Runtime_Abort possibly
+ // re-entering here.
+ stop();
+ bind(&alignment_as_expected);
+ }
+ }
+#endif // V8_HOST_ARCH_LOONG64
+
+ // Just call directly. The function called cannot cause a GC, or
+ // allow preemption, so the return address in the link register
+ // stays correct.
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (function != t7) {
+ mov(t7, function);
+ function = t7;
+ }
+
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ // 't' registers are caller-saved so this is safe as a scratch register.
+ Register pc_scratch = t1;
+ Register scratch = t2;
+ DCHECK(!AreAliased(pc_scratch, scratch, function));
+
+ pcaddi(pc_scratch, 1);
+
+ // See x64 code for reasoning about how to address the isolate data fields.
+ if (root_array_available()) {
+ St_d(pc_scratch, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset()));
+ St_d(fp, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ li(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ St_d(pc_scratch, MemOperand(scratch, 0));
+ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ St_d(fp, MemOperand(scratch, 0));
+ }
+
+ Call(function);
+
+ // We don't unset the PC; the FP is the source of truth.
+ if (root_array_available()) {
+ St_d(zero_reg, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ St_d(zero_reg, MemOperand(scratch, 0));
+ }
+ }
+
+ int stack_passed_arguments =
+ CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
+
+ if (base::OS::ActivationFrameAlignment() > kPointerSize) {
+ Ld_d(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ Add_d(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+ }
+}
+
+#undef BRANCH_ARGS_CHECK
+
+void TurboAssembler::CheckPageFlag(const Register& object, int mask,
+ Condition cc, Label* condition_met) {
+ UseScratchRegisterScope temps(this);
+ temps.Include(t8);
+ Register scratch = temps.Acquire();
+ And(scratch, object, Operand(~kPageAlignmentMask));
+ Ld_d(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
+ And(scratch, scratch, Operand(mask));
+ Branch(condition_met, cc, scratch, Operand(zero_reg));
+}
+
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
+ Register reg4, Register reg5,
+ Register reg6) {
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ Register candidate = Register::from_code(code);
+ if (regs & candidate.bit()) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+}
+
+void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+ // TODO(LOONG_dev): range check, add Pcadd macro function?
+ pcaddi(dst, -pc_offset() >> 2);
+}
+
+void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+ DeoptimizeKind kind, Label* ret,
+ Label*) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Ld_d(t7, MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(target)));
+ Call(t7);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
+
+ if (kind == DeoptimizeKind::kEagerWithResume) {
+ Branch(ret);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ Deoptimizer::kEagerWithResumeBeforeArgsSize);
+ }
+}
+
+void TurboAssembler::LoadCodeObjectEntry(Register destination,
+ Register code_object) {
+ // Code objects are called differently depending on whether we are generating
+ // builtin code (which will later be embedded into the binary) or compiling
+ // user JS code at runtime.
+ // * Builtin code runs in --jitless mode and thus must not call into on-heap
+ // Code targets. Instead, we dispatch through the builtins entry table.
+ // * Codegen at runtime does not have this restriction and we can use the
+ // shorter, branchless instruction sequence. The assumption here is that
+ // targets are usually generated code and not builtin Code objects.
+ if (options().isolate_independent_code) {
+ DCHECK(root_array_available());
+ Label if_code_is_off_heap, out;
+ Register scratch = t8;
+
+ DCHECK(!AreAliased(destination, scratch));
+ DCHECK(!AreAliased(code_object, scratch));
+
+ // Check whether the Code object is an off-heap trampoline. If so, call its
+ // (off-heap) entry point directly without going through the (on-heap)
+ // trampoline. Otherwise, just call the Code object as always.
+ Ld_w(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
+ And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask));
+ BranchShort(&if_code_is_off_heap, ne, scratch, Operand(zero_reg));
+ // Not an off-heap trampoline object, the entry point is at
+ // Code::raw_instruction_start().
+ Add_d(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
+ Branch(&out);
+
+ // An off-heap trampoline, the entry point is loaded from the builtin entry
+ // table.
+ bind(&if_code_is_off_heap);
+ Ld_w(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
+ // TODO(liuyu): don't use scratch_reg in Alsl_d;
+ Alsl_d(destination, scratch, kRootRegister, kSystemPointerSizeLog2,
+ zero_reg);
+ Ld_d(destination,
+ MemOperand(destination, IsolateData::builtin_entry_table_offset()));
+
+ bind(&out);
+ } else {
+ Add_d(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
+ }
+}
+
+void TurboAssembler::CallCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Call(code_object);
+}
+
+void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ DCHECK_EQ(JumpMode::kJump, jump_mode);
+ LoadCodeObjectEntry(code_object, code_object);
+ Jump(code_object);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
new file mode 100644
index 0000000000..ef670fd1cd
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
@@ -0,0 +1,1062 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
+#error This header must be included via macro-assembler.h
+#endif
+
+#ifndef V8_CODEGEN_LOONG64_MACRO_ASSEMBLER_LOONG64_H_
+#define V8_CODEGEN_LOONG64_MACRO_ASSEMBLER_LOONG64_H_
+
+#include "src/codegen/assembler.h"
+#include "src/codegen/loong64/assembler-loong64.h"
+#include "src/common/globals.h"
+#include "src/objects/tagged-index.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+enum class AbortReason : uint8_t;
+
+// Flags used for LeaveExitFrame function.
+enum LeaveExitFrameMode { EMIT_RETURN = true, NO_EMIT_RETURN = false };
+
+// Flags used for the li macro-assembler function.
+enum LiFlags {
+ // If the constant value can be represented in just 12 bits, then
+ // optimize the li to use a single instruction, rather than lu12i_w/lu32i_d/
+ // lu52i_d/ori sequence. A number of other optimizations that emits less than
+ // maximum number of instructions exists.
+ OPTIMIZE_SIZE = 0,
+ // Always use 4 instructions (lu12i_w/ori/lu32i_d/lu52i_d sequence),
+ // even if the constant could be loaded with just one, so that this value is
+ // patchable later.
+ CONSTANT_SIZE = 1,
+ // For address loads only 3 instruction are required. Used to mark
+ // constant load that will be used as address without relocation
+ // information. It ensures predictable code size, so specific sites
+ // in code are patchable.
+ ADDRESS_LOAD = 2
+};
+
+enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
+
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
+ Register reg3 = no_reg,
+ Register reg4 = no_reg,
+ Register reg5 = no_reg,
+ Register reg6 = no_reg);
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+#define SmiWordOffset(offset) (offset + kPointerSize / 2)
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
+class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
+ public:
+ using TurboAssemblerBase::TurboAssemblerBase;
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
+ // Out-of-line constant pool not implemented on loong64.
+ UNREACHABLE();
+ }
+ void LeaveFrame(StackFrame::Type type);
+
+ void AllocateStackSpace(Register bytes) { Sub_d(sp, sp, bytes); }
+
+ void AllocateStackSpace(int bytes) {
+ DCHECK_GE(bytes, 0);
+ if (bytes == 0) return;
+ Sub_d(sp, sp, Operand(bytes));
+ }
+
+ // Generates function and stub prologue code.
+ void StubPrologue(StackFrame::Type type);
+ void Prologue();
+
+ void InitializeRootRegister() {
+ ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
+ li(kRootRegister, Operand(isolate_root));
+ }
+
+ // Jump unconditionally to given label.
+ // Use rather b(Label) for code generation.
+ void jmp(Label* L) { Branch(L); }
+
+ // -------------------------------------------------------------------------
+ // Debugging.
+
+ void Trap();
+ void DebugBreak();
+
+ // Calls Abort(msg) if the condition cc is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cc, AbortReason reason, Register rj, Operand rk);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cc, AbortReason reason, Register rj, Operand rk);
+
+ // Print a message to stdout and abort execution.
+ void Abort(AbortReason msg);
+
+ void Branch(Label* label, bool need_link = false);
+ void Branch(Label* label, Condition cond, Register r1, const Operand& r2,
+ bool need_link = false);
+ void BranchShort(Label* label, Condition cond, Register r1, const Operand& r2,
+ bool need_link = false);
+ void Branch(Label* L, Condition cond, Register rj, RootIndex index);
+
+ // Floating point branches
+ void CompareF32(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
+ CFRegister cd = FCC0) {
+ CompareF(cmp1, cmp2, cc, cd, true);
+ }
+
+ void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2,
+ CFRegister cd = FCC0) {
+ CompareIsNanF(cmp1, cmp2, cd, true);
+ }
+
+ void CompareF64(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
+ CFRegister cd = FCC0) {
+ CompareF(cmp1, cmp2, cc, cd, false);
+ }
+
+ void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2,
+ CFRegister cd = FCC0) {
+ CompareIsNanF(cmp1, cmp2, cd, false);
+ }
+
+ void BranchTrueShortF(Label* target, CFRegister cc = FCC0);
+ void BranchFalseShortF(Label* target, CFRegister cc = FCC0);
+
+ void BranchTrueF(Label* target, CFRegister cc = FCC0);
+ void BranchFalseF(Label* target, CFRegister cc = FCC0);
+
+ static int InstrCountForLi64Bit(int64_t value);
+ inline void LiLower32BitHelper(Register rd, Operand j);
+ void li_optimized(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+ void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+ inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
+ li(rd, Operand(j), mode);
+ }
+ inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
+ li(rd, Operand(static_cast<int64_t>(j)), mode);
+ }
+ void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
+ void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
+ void li(Register dst, const StringConstantBase* string,
+ LiFlags mode = OPTIMIZE_SIZE);
+
+ void LoadFromConstantsTable(Register destination, int constant_index) final;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
+ void LoadRootRelative(Register destination, int32_t offset) final;
+
+ inline void Move(Register output, MemOperand operand) {
+ Ld_d(output, operand);
+ }
+
+ inline void GenPCRelativeJump(Register rd, int64_t offset);
+ inline void GenPCRelativeJumpAndLink(Register rd, int64_t offset);
+
+// Jump, Call, and Ret pseudo instructions implementing inter-working.
+#define COND_ARGS \
+ Condition cond = al, Register rj = zero_reg, \
+ const Operand &rk = Operand(zero_reg)
+
+ void Jump(Register target, COND_ARGS);
+ void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
+ void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ // Deffer from li, this method save target to the memory, and then load
+ // it to register use ld_d, it can be used in wasm jump table for concurrent
+ // patching.
+ void PatchAndJump(Address target);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
+ void Jump(const ExternalReference& reference);
+ void Call(Register target, COND_ARGS);
+ void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ COND_ARGS);
+ void Call(Label* target);
+
+ // Load the builtin given by the Smi in |builtin_index| into the same
+ // register.
+ void LoadEntryFromBuiltinIndex(Register builtin);
+ void LoadEntryFromBuiltin(Builtin builtin, Register destination);
+ MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
+
+ void CallBuiltinByIndex(Register builtin);
+ void CallBuiltin(Builtin builtin);
+
+ void LoadCodeObjectEntry(Register destination, Register code_object);
+ void CallCodeObject(Register code_object);
+
+ void JumpCodeObject(Register code_object,
+ JumpMode jump_mode = JumpMode::kJump);
+
+ // Generates an instruction sequence s.t. the return address points to the
+ // instruction following the call.
+ // The return address on the stack is used by frame iteration.
+ void StoreReturnAddressAndCall(Register target);
+
+ void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
+ DeoptimizeKind kind, Label* ret,
+ Label* jump_deoptimization_entry_label);
+
+ void Ret(COND_ARGS);
+
+ // Emit code to discard a non-negative number of pointer-sized elements
+ // from the stack, clobbering only the sp register.
+ void Drop(int count, Condition cond = cc_always, Register reg = no_reg,
+ const Operand& op = Operand(no_reg));
+
+ void Ld_d(Register rd, const MemOperand& rj);
+ void St_d(Register rd, const MemOperand& rj);
+
+ void Push(Handle<HeapObject> handle);
+ void Push(Smi smi);
+
+ void Push(Register src) {
+ Add_d(sp, sp, Operand(-kPointerSize));
+ St_d(src, MemOperand(sp, 0));
+ }
+
+ // Push two registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2) {
+ Sub_d(sp, sp, Operand(2 * kPointerSize));
+ St_d(src1, MemOperand(sp, 1 * kPointerSize));
+ St_d(src2, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push three registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3) {
+ Sub_d(sp, sp, Operand(3 * kPointerSize));
+ St_d(src1, MemOperand(sp, 2 * kPointerSize));
+ St_d(src2, MemOperand(sp, 1 * kPointerSize));
+ St_d(src3, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push four registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4) {
+ Sub_d(sp, sp, Operand(4 * kPointerSize));
+ St_d(src1, MemOperand(sp, 3 * kPointerSize));
+ St_d(src2, MemOperand(sp, 2 * kPointerSize));
+ St_d(src3, MemOperand(sp, 1 * kPointerSize));
+ St_d(src4, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push five registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4,
+ Register src5) {
+ Sub_d(sp, sp, Operand(5 * kPointerSize));
+ St_d(src1, MemOperand(sp, 4 * kPointerSize));
+ St_d(src2, MemOperand(sp, 3 * kPointerSize));
+ St_d(src3, MemOperand(sp, 2 * kPointerSize));
+ St_d(src4, MemOperand(sp, 1 * kPointerSize));
+ St_d(src5, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ enum PushArrayOrder { kNormal, kReverse };
+ void PushArray(Register array, Register size, Register scratch,
+ Register scratch2, PushArrayOrder order = kNormal);
+
+ void MaybeSaveRegisters(RegList registers);
+ void MaybeRestoreRegisters(RegList registers);
+
+ void CallEphemeronKeyBarrier(Register object, Operand offset,
+ SaveFPRegsMode fp_mode);
+
+ void CallRecordWriteStubSaveRegisters(
+ Register object, Operand offset,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+ void CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+
+ // For a given |object| and |offset|:
+ // - Move |object| to |dst_object|.
+ // - Compute the address of the slot pointed to by |offset| in |object| and
+ // write it to |dst_slot|.
+ // This method makes sure |object| and |offset| are allowed to overlap with
+ // the destination registers.
+ void MoveObjectAndSlot(Register dst_object, Register dst_slot,
+ Register object, Operand offset);
+
+ // Push multiple registers on the stack.
+ // Registers are saved in numerical order, with higher numbered registers
+ // saved in higher memory addresses.
+ void MultiPush(RegList regs);
+ void MultiPush(RegList regs1, RegList regs2);
+ void MultiPush(RegList regs1, RegList regs2, RegList regs3);
+ void MultiPushFPU(RegList regs);
+
+ // Calculate how much stack space (in bytes) are required to store caller
+ // registers excluding those specified in the arguments.
+ int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg) const;
+
+ // Push caller saved registers on the stack, and return the number of bytes
+ // stack pointer is adjusted.
+ int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+ // Restore caller saved registers from the stack, and return the number of
+ // bytes stack pointer is adjusted.
+ int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+
+ void Pop(Register dst) {
+ Ld_d(dst, MemOperand(sp, 0));
+ Add_d(sp, sp, Operand(kPointerSize));
+ }
+
+ // Pop two registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2) {
+ DCHECK(src1 != src2);
+ Ld_d(src2, MemOperand(sp, 0 * kPointerSize));
+ Ld_d(src1, MemOperand(sp, 1 * kPointerSize));
+ Add_d(sp, sp, 2 * kPointerSize);
+ }
+
+ // Pop three registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3) {
+ Ld_d(src3, MemOperand(sp, 0 * kPointerSize));
+ Ld_d(src2, MemOperand(sp, 1 * kPointerSize));
+ Ld_d(src1, MemOperand(sp, 2 * kPointerSize));
+ Add_d(sp, sp, 3 * kPointerSize);
+ }
+
+ // Pops multiple values from the stack and load them in the
+ // registers specified in regs. Pop order is the opposite as in MultiPush.
+ void MultiPop(RegList regs);
+ void MultiPop(RegList regs1, RegList regs2);
+ void MultiPop(RegList regs1, RegList regs2, RegList regs3);
+
+ void MultiPopFPU(RegList regs);
+
+#define DEFINE_INSTRUCTION(instr) \
+ void instr(Register rd, Register rj, const Operand& rk); \
+ void instr(Register rd, Register rj, Register rk) { \
+ instr(rd, rj, Operand(rk)); \
+ } \
+ void instr(Register rj, Register rk, int32_t j) { instr(rj, rk, Operand(j)); }
+
+#define DEFINE_INSTRUCTION2(instr) \
+ void instr(Register rj, const Operand& rk); \
+ void instr(Register rj, Register rk) { instr(rj, Operand(rk)); } \
+ void instr(Register rj, int32_t j) { instr(rj, Operand(j)); }
+
+ DEFINE_INSTRUCTION(Add_w)
+ DEFINE_INSTRUCTION(Add_d)
+ DEFINE_INSTRUCTION(Div_w)
+ DEFINE_INSTRUCTION(Div_wu)
+ DEFINE_INSTRUCTION(Div_du)
+ DEFINE_INSTRUCTION(Mod_w)
+ DEFINE_INSTRUCTION(Mod_wu)
+ DEFINE_INSTRUCTION(Div_d)
+ DEFINE_INSTRUCTION(Sub_w)
+ DEFINE_INSTRUCTION(Sub_d)
+ DEFINE_INSTRUCTION(Mod_d)
+ DEFINE_INSTRUCTION(Mod_du)
+ DEFINE_INSTRUCTION(Mul_w)
+ DEFINE_INSTRUCTION(Mulh_w)
+ DEFINE_INSTRUCTION(Mulh_wu)
+ DEFINE_INSTRUCTION(Mul_d)
+ DEFINE_INSTRUCTION(Mulh_d)
+ DEFINE_INSTRUCTION2(Div_w)
+ DEFINE_INSTRUCTION2(Div_d)
+ DEFINE_INSTRUCTION2(Div_wu)
+ DEFINE_INSTRUCTION2(Div_du)
+
+ DEFINE_INSTRUCTION(And)
+ DEFINE_INSTRUCTION(Or)
+ DEFINE_INSTRUCTION(Xor)
+ DEFINE_INSTRUCTION(Nor)
+ DEFINE_INSTRUCTION2(Neg)
+ DEFINE_INSTRUCTION(Andn)
+ DEFINE_INSTRUCTION(Orn)
+
+ DEFINE_INSTRUCTION(Slt)
+ DEFINE_INSTRUCTION(Sltu)
+ DEFINE_INSTRUCTION(Slti)
+ DEFINE_INSTRUCTION(Sltiu)
+ DEFINE_INSTRUCTION(Sle)
+ DEFINE_INSTRUCTION(Sleu)
+ DEFINE_INSTRUCTION(Sgt)
+ DEFINE_INSTRUCTION(Sgtu)
+ DEFINE_INSTRUCTION(Sge)
+ DEFINE_INSTRUCTION(Sgeu)
+
+ DEFINE_INSTRUCTION(Rotr_w)
+ DEFINE_INSTRUCTION(Rotr_d)
+
+#undef DEFINE_INSTRUCTION
+#undef DEFINE_INSTRUCTION2
+#undef DEFINE_INSTRUCTION3
+
+ void SmiUntag(Register dst, const MemOperand& src);
+ void SmiUntag(Register dst, Register src) {
+ if (SmiValuesAre32Bits()) {
+ srai_d(dst, src, kSmiShift);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ srai_w(dst, src, kSmiShift);
+ }
+ }
+
+ void SmiUntag(Register reg) { SmiUntag(reg, reg); }
+
+ int CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments);
+
+ // Before calling a C-function from generated code, align arguments on stack.
+ // After aligning the frame, non-register arguments must be stored on the
+ // stack, after the argument-slots using helper: CFunctionArgumentOperand().
+ // The argument count assumes all arguments are word sized.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
+ Register scratch);
+ void PrepareCallCFunction(int num_reg_arguments, Register scratch);
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+ void CallCFunction(ExternalReference function, int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments);
+
+ // See comments at the beginning of Builtins::Generate_CEntry.
+ inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
+ inline void PrepareCEntryFunction(const ExternalReference& ref) {
+ li(a1, ref);
+ }
+
+ void CheckPageFlag(const Register& object, int mask, Condition cc,
+ Label* condition_met);
+#undef COND_ARGS
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
+ DoubleRegister double_input, StubCallMode stub_mode);
+
+ // Conditional move.
+ void Movz(Register rd, Register rj, Register rk);
+ void Movn(Register rd, Register rj, Register rk);
+
+ void LoadZeroIfFPUCondition(Register dest, CFRegister = FCC0);
+ void LoadZeroIfNotFPUCondition(Register dest, CFRegister = FCC0);
+
+ void LoadZeroIfConditionNotZero(Register dest, Register condition);
+ void LoadZeroIfConditionZero(Register dest, Register condition);
+ void LoadZeroOnCondition(Register rd, Register rj, const Operand& rk,
+ Condition cond);
+
+ void Clz_w(Register rd, Register rj);
+ void Clz_d(Register rd, Register rj);
+ void Ctz_w(Register rd, Register rj);
+ void Ctz_d(Register rd, Register rj);
+ void Popcnt_w(Register rd, Register rj);
+ void Popcnt_d(Register rd, Register rj);
+
+ void ExtractBits(Register dest, Register source, Register pos, int size,
+ bool sign_extend = false);
+ void InsertBits(Register dest, Register source, Register pos, int size);
+
+ void Bstrins_w(Register rk, Register rj, uint16_t msbw, uint16_t lswb);
+ void Bstrins_d(Register rk, Register rj, uint16_t msbw, uint16_t lsbw);
+ void Bstrpick_w(Register rk, Register rj, uint16_t msbw, uint16_t lsbw);
+ void Bstrpick_d(Register rk, Register rj, uint16_t msbw, uint16_t lsbw);
+ void Neg_s(FPURegister fd, FPURegister fj);
+ void Neg_d(FPURegister fd, FPURegister fk);
+
+ // Convert single to unsigned word.
+ void Trunc_uw_s(FPURegister fd, FPURegister fj, FPURegister scratch);
+ void Trunc_uw_s(Register rd, FPURegister fj, FPURegister scratch);
+
+ // Change endianness
+ void ByteSwapSigned(Register dest, Register src, int operand_size);
+ void ByteSwapUnsigned(Register dest, Register src, int operand_size);
+
+ void Ld_b(Register rd, const MemOperand& rj);
+ void Ld_bu(Register rd, const MemOperand& rj);
+ void St_b(Register rd, const MemOperand& rj);
+
+ void Ld_h(Register rd, const MemOperand& rj);
+ void Ld_hu(Register rd, const MemOperand& rj);
+ void St_h(Register rd, const MemOperand& rj);
+
+ void Ld_w(Register rd, const MemOperand& rj);
+ void Ld_wu(Register rd, const MemOperand& rj);
+ void St_w(Register rd, const MemOperand& rj);
+
+ void Fld_s(FPURegister fd, const MemOperand& src);
+ void Fst_s(FPURegister fj, const MemOperand& dst);
+
+ void Fld_d(FPURegister fd, const MemOperand& src);
+ void Fst_d(FPURegister fj, const MemOperand& dst);
+
+ void Ll_w(Register rd, const MemOperand& rj);
+ void Sc_w(Register rd, const MemOperand& rj);
+
+ void Ll_d(Register rd, const MemOperand& rj);
+ void Sc_d(Register rd, const MemOperand& rj);
+
+ // These functions assume (and assert) that src1!=src2. It is permitted
+ // for the result to alias either input register.
+ void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+
+ // Generate out-of-line cases for the macros above.
+ void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+
+ bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
+
+ void mov(Register rd, Register rj) { or_(rd, rj, zero_reg); }
+
+ inline void Move(Register dst, Handle<HeapObject> handle) { li(dst, handle); }
+ inline void Move(Register dst, Smi smi) { li(dst, Operand(smi)); }
+
+ inline void Move(Register dst, Register src) {
+ if (dst != src) {
+ mov(dst, src);
+ }
+ }
+
+ inline void FmoveLow(Register dst_low, FPURegister src) {
+ movfr2gr_s(dst_low, src);
+ }
+
+ void FmoveLow(FPURegister dst, Register src_low);
+
+ inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); }
+
+ inline void Move_d(FPURegister dst, FPURegister src) {
+ if (dst != src) {
+ fmov_d(dst, src);
+ }
+ }
+
+ inline void Move_s(FPURegister dst, FPURegister src) {
+ if (dst != src) {
+ fmov_s(dst, src);
+ }
+ }
+
+ void Move(FPURegister dst, float imm) { Move(dst, bit_cast<uint32_t>(imm)); }
+ void Move(FPURegister dst, double imm) { Move(dst, bit_cast<uint64_t>(imm)); }
+ void Move(FPURegister dst, uint32_t src);
+ void Move(FPURegister dst, uint64_t src);
+
+ // AddOverflow_d sets overflow register to a negative value if
+ // overflow occured, otherwise it is zero or positive
+ void AddOverflow_d(Register dst, Register left, const Operand& right,
+ Register overflow);
+ // SubOverflow_d sets overflow register to a negative value if
+ // overflow occured, otherwise it is zero or positive
+ void SubOverflow_d(Register dst, Register left, const Operand& right,
+ Register overflow);
+ // MulOverflow_w sets overflow register to zero if no overflow occured
+ void MulOverflow_w(Register dst, Register left, const Operand& right,
+ Register overflow);
+
+ // TODO(LOONG_dev): LOONG64 Remove this constant
+ // Number of instructions needed for calculation of switch table entry address
+ static const int kSwitchTablePrologueSize = 5;
+
+ // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
+ // functor/function with 'Label *func(size_t index)' declaration.
+ template <typename Func>
+ void GenerateSwitchTable(Register index, size_t case_count,
+ Func GetLabelFunction);
+
+ // Load an object from the root table.
+ void LoadRoot(Register destination, RootIndex index) final;
+ void LoadRoot(Register destination, RootIndex index, Condition cond,
+ Register src1, const Operand& src2);
+
+ void LoadMap(Register destination, Register object);
+
+ // If the value is a NaN, canonicalize the value else, do nothing.
+ void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
+
+ // ---------------------------------------------------------------------------
+ // FPU macros. These do not handle special cases like NaN or +- inf.
+
+ // Convert unsigned word to double.
+ void Ffint_d_uw(FPURegister fd, FPURegister fj);
+ void Ffint_d_uw(FPURegister fd, Register rj);
+
+ // Convert unsigned long to double.
+ void Ffint_d_ul(FPURegister fd, FPURegister fj);
+ void Ffint_d_ul(FPURegister fd, Register rj);
+
+ // Convert unsigned word to float.
+ void Ffint_s_uw(FPURegister fd, FPURegister fj);
+ void Ffint_s_uw(FPURegister fd, Register rj);
+
+ // Convert unsigned long to float.
+ void Ffint_s_ul(FPURegister fd, FPURegister fj);
+ void Ffint_s_ul(FPURegister fd, Register rj);
+
+ // Convert double to unsigned word.
+ void Ftintrz_uw_d(FPURegister fd, FPURegister fj, FPURegister scratch);
+ void Ftintrz_uw_d(Register rd, FPURegister fj, FPURegister scratch);
+
+ // Convert single to unsigned word.
+ void Ftintrz_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
+ void Ftintrz_uw_s(Register rd, FPURegister fs, FPURegister scratch);
+
+ // Convert double to unsigned long.
+ void Ftintrz_ul_d(FPURegister fd, FPURegister fj, FPURegister scratch,
+ Register result = no_reg);
+ void Ftintrz_ul_d(Register rd, FPURegister fj, FPURegister scratch,
+ Register result = no_reg);
+
+ // Convert single to unsigned long.
+ void Ftintrz_ul_s(FPURegister fd, FPURegister fj, FPURegister scratch,
+ Register result = no_reg);
+ void Ftintrz_ul_s(Register rd, FPURegister fj, FPURegister scratch,
+ Register result = no_reg);
+
+ // Round double functions
+ void Trunc_d(FPURegister fd, FPURegister fj);
+ void Round_d(FPURegister fd, FPURegister fj);
+ void Floor_d(FPURegister fd, FPURegister fj);
+ void Ceil_d(FPURegister fd, FPURegister fj);
+
+ // Round float functions
+ void Trunc_s(FPURegister fd, FPURegister fj);
+ void Round_s(FPURegister fd, FPURegister fj);
+ void Floor_s(FPURegister fd, FPURegister fj);
+ void Ceil_s(FPURegister fd, FPURegister fj);
+
+ // Jump the register contains a smi.
+ void JumpIfSmi(Register value, Label* smi_label);
+
+ void JumpIfEqual(Register a, int32_t b, Label* dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(b));
+ Branch(dest, eq, a, Operand(scratch));
+ }
+
+ void JumpIfLessThan(Register a, int32_t b, Label* dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(b));
+ Branch(dest, lt, a, Operand(scratch));
+ }
+
+ // Push a standard frame, consisting of ra, fp, context and JS function.
+ void PushStandardFrame(Register function_reg);
+
+ // Get the actual activation frame alignment for target environment.
+ static int ActivationFrameAlignment();
+
+ // Load Scaled Address instructions. Parameter sa (shift argument) must be
+ // between [1, 31] (inclusive). The scratch register may be clobbered.
+ void Alsl_w(Register rd, Register rj, Register rk, uint8_t sa,
+ Register scratch = t7);
+ void Alsl_d(Register rd, Register rj, Register rk, uint8_t sa,
+ Register scratch = t7);
+
+ // Compute the start of the generated instruction stream from the current PC.
+ // This is an alternative to embedding the {CodeObject} handle as a reference.
+ void ComputeCodeStartAddress(Register dst);
+
+ // Control-flow integrity:
+
+ // Define a function entrypoint. This doesn't emit any code for this
+ // architecture, as control-flow integrity is not supported for it.
+ void CodeEntry() {}
+ // Define an exception handler.
+ void ExceptionHandler() {}
+ // Define an exception handler and bind a label.
+ void BindExceptionHandler(Label* label) { bind(label); }
+
+ protected:
+ inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch);
+ inline int32_t GetOffset(Label* L, OffsetSize bits);
+
+ private:
+ bool has_double_zero_reg_set_ = false;
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
+ Label* done);
+
+ bool BranchShortOrFallback(Label* L, Condition cond, Register rj,
+ const Operand& rk, bool need_link);
+
+ // f32 or f64
+ void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
+ CFRegister cd, bool f32 = true);
+
+ void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd,
+ bool f32 = true);
+
+ void CallCFunctionHelper(Register function, int num_reg_arguments,
+ int num_double_arguments);
+
+ void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode);
+
+ void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode);
+
+ // Push a fixed frame, consisting of ra, fp.
+ void PushCommonFrame(Register marker_reg = no_reg);
+};
+
+// MacroAssembler implements a collection of frequently used macros.
+class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
+ public:
+ using TurboAssembler::TurboAssembler;
+
+ // It assumes that the arguments are located below the stack pointer.
+ // argc is the number of arguments not including the receiver.
+ // TODO(LOONG_dev): LOONG64: Remove this function once we stick with the
+ // reversed arguments order.
+ void LoadReceiver(Register dest, Register argc) {
+ Ld_d(dest, MemOperand(sp, 0));
+ }
+
+ void StoreReceiver(Register rec, Register argc, Register scratch) {
+ St_d(rec, MemOperand(sp, 0));
+ }
+
+ bool IsNear(Label* L, Condition cond, int rs_reg);
+
+ // Swap two registers. If the scratch register is omitted then a slightly
+ // less efficient form using xor instead of mov is emitted.
+ void Swap(Register reg1, Register reg2, Register scratch = no_reg);
+
+ void PushRoot(RootIndex index) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadRoot(scratch, index);
+ Push(scratch);
+ }
+
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadRoot(scratch, index);
+ Branch(if_equal, eq, with, Operand(scratch));
+ }
+
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadRoot(scratch, index);
+ Branch(if_not_equal, ne, with, Operand(scratch));
+ }
+
+ // Checks if value is in range [lower_limit, higher_limit] using a single
+ // comparison.
+ void JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Label* on_in_range);
+
+ // ---------------------------------------------------------------------------
+ // GC Support
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object, int offset, Register value, RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
+
+ // For a given |object| notify the garbage collector that the slot at |offset|
+ // has been written. |value| is the object being stored.
+ void RecordWrite(
+ Register object, Operand offset, Register value, RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
+
+ // ---------------------------------------------------------------------------
+ // Pseudo-instructions.
+
+ // Convert double to unsigned long.
+ void Ftintrz_l_ud(FPURegister fd, FPURegister fj, FPURegister scratch);
+
+ void Ftintrz_l_d(FPURegister fd, FPURegister fj);
+ void Ftintrne_l_d(FPURegister fd, FPURegister fj);
+ void Ftintrm_l_d(FPURegister fd, FPURegister fj);
+ void Ftintrp_l_d(FPURegister fd, FPURegister fj);
+
+ void Ftintrz_w_d(FPURegister fd, FPURegister fj);
+ void Ftintrne_w_d(FPURegister fd, FPURegister fj);
+ void Ftintrm_w_d(FPURegister fd, FPURegister fj);
+ void Ftintrp_w_d(FPURegister fd, FPURegister fj);
+
+ void Madd_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk);
+ void Madd_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk);
+ void Msub_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk);
+ void Msub_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk);
+
+ // Enter exit frame.
+ // argc - argument count to be dropped by LeaveExitFrame.
+ // save_doubles - saves FPU registers on stack, currently disabled.
+ // stack_space - extra stack space.
+ void EnterExitFrame(bool save_doubles, int stack_space = 0,
+ StackFrame::Type frame_type = StackFrame::EXIT);
+
+ // Leave the current exit frame.
+ void LeaveExitFrame(bool save_doubles, Register arg_count,
+ bool do_return = NO_EMIT_RETURN,
+ bool argument_count_is_length = false);
+
+ // Make sure the stack is aligned. Only emits code in debug mode.
+ void AssertStackIsAligned();
+
+ // Load the global proxy from the current context.
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
+ }
+
+ void LoadNativeContextSlot(Register dst, int index);
+
+ // Load the initial map from the global function. The registers
+ // function and map can be the same, function is then overwritten.
+ void LoadGlobalFunctionInitialMap(Register function, Register map,
+ Register scratch);
+
+ // -------------------------------------------------------------------------
+ // JavaScript invokes.
+
+ // Invoke the JavaScript function code by either calling or jumping.
+ void InvokeFunctionCode(Register function, Register new_target,
+ Register expected_parameter_count,
+ Register actual_parameter_count, InvokeType type);
+
+ // On function call, call into the debugger.
+ void CallDebugOnFunctionCall(Register fun, Register new_target,
+ Register expected_parameter_count,
+ Register actual_parameter_count);
+
+ // Invoke the JavaScript function in the given register. Changes the
+ // current context to the context in the function before invoking.
+ void InvokeFunctionWithNewTarget(Register function, Register new_target,
+ Register actual_parameter_count,
+ InvokeType type);
+ void InvokeFunction(Register function, Register expected_parameter_count,
+ Register actual_parameter_count, InvokeType type);
+
+ // Exception handling.
+
+ // Push a new stack handler and link into stack handler chain.
+ void PushStackHandler();
+
+ // Unlink the stack handler on top of the stack from the stack handler chain.
+ // Must preserve the result register.
+ void PopStackHandler();
+
+ // -------------------------------------------------------------------------
+ // Support functions.
+
+ void GetObjectType(Register function, Register map, Register type_reg);
+
+ void GetInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit, Register range);
+
+ // -------------------------------------------------------------------------
+ // Runtime calls.
+
+ // Call a runtime routine.
+ void CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
+ }
+
+ // Convenience function: tail call a runtime routine (jump).
+ void TailCallRuntime(Runtime::FunctionId fid);
+
+ // Jump to the builtin routine.
+ void JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame = false);
+
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(Address entry);
+
+ // ---------------------------------------------------------------------------
+ // In-place weak references.
+ void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
+
+ // -------------------------------------------------------------------------
+ // StatsCounter support.
+
+ void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitIncrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitDecrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+
+ // -------------------------------------------------------------------------
+ // Stack limit utilities
+
+ enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+ void LoadStackLimit(Register destination, StackLimitKind kind);
+ void StackOverflowCheck(Register num_args, Register scratch1,
+ Register scratch2, Label* stack_overflow);
+
+ // ---------------------------------------------------------------------------
+ // Smi utilities.
+
+ void SmiTag(Register dst, Register src) {
+ STATIC_ASSERT(kSmiTag == 0);
+ if (SmiValuesAre32Bits()) {
+ slli_d(dst, src, 32);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ add_w(dst, src, src);
+ }
+ }
+
+ void SmiTag(Register reg) { SmiTag(reg, reg); }
+
+ // Left-shifted from int32 equivalent of Smi.
+ void SmiScale(Register dst, Register src, int scale) {
+ if (SmiValuesAre32Bits()) {
+ // The int portion is upper 32-bits of 64-bit word.
+ srai_d(dst, src, kSmiShift - scale);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ DCHECK_GE(scale, kSmiTagSize);
+ slli_w(dst, src, scale - kSmiTagSize);
+ }
+ }
+
+ // Test if the register contains a smi.
+ inline void SmiTst(Register value, Register scratch) {
+ And(scratch, value, Operand(kSmiTagMask));
+ }
+
+ // Jump if the register contains a non-smi.
+ void JumpIfNotSmi(Register value, Label* not_smi_label);
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
+
+ // Abort execution if argument is not a Constructor, enabled via --debug-code.
+ void AssertConstructor(Register object);
+
+ // Abort execution if argument is not a JSFunction, enabled via --debug-code.
+ void AssertFunction(Register object);
+
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
+ // Abort execution if argument is not a JSGeneratorObject (or subclass),
+ // enabled via --debug-code.
+ void AssertGeneratorObject(Register object);
+
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
+ template <typename Field>
+ void DecodeField(Register dst, Register src) {
+ Bstrpick_d(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
+ }
+
+ template <typename Field>
+ void DecodeField(Register reg) {
+ DecodeField<Field>(reg, reg);
+ }
+
+ private:
+ // Helper functions for generating invokes.
+ void InvokePrologue(Register expected_parameter_count,
+ Register actual_parameter_count, Label* done,
+ InvokeType type);
+
+ friend class CommonFrame;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
+};
+
+template <typename Func>
+void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
+ Func GetLabelFunction) {
+ UseScratchRegisterScope scope(this);
+ Register scratch = scope.Acquire();
+ BlockTrampolinePoolFor((3 + case_count) * kInstrSize);
+
+ pcaddi(scratch, 3);
+ alsl_d(scratch, index, scratch, kInstrSizeLog2);
+ jirl(zero_reg, scratch, 0);
+ for (size_t index = 0; index < case_count; ++index) {
+ b(GetLabelFunction(index));
+ }
+}
+
+#define ACCESS_MASM(masm) masm->
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_LOONG64_MACRO_ASSEMBLER_LOONG64_H_
diff --git a/deps/v8/src/codegen/loong64/register-loong64.h b/deps/v8/src/codegen/loong64/register-loong64.h
new file mode 100644
index 0000000000..7d9d88c1f0
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/register-loong64.h
@@ -0,0 +1,288 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_
+#define V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_
+
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/register.h"
+#include "src/codegen/reglist.h"
+
+namespace v8 {
+namespace internal {
+
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(zero_reg) V(ra) V(tp) V(sp) \
+ V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7) \
+ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(t7) V(t8) \
+ V(x_reg) V(fp) \
+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(s8) \
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7) \
+ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) \
+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s7) V(s8)
+
+#define DOUBLE_REGISTERS(V) \
+ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
+ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \
+ V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
+ V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
+
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD128_REGISTERS(V) \
+ V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \
+ V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \
+ V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \
+ V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
+ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) V(f16) \
+ V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23)
+// clang-format on
+
+// Note that the bit values must match those used in actual instruction
+// encoding.
+const int kNumRegs = 32;
+
+const RegList kJSCallerSaved = 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7 | // a3
+ 1 << 8 | // a4
+ 1 << 9 | // a5
+ 1 << 10 | // a6
+ 1 << 11 | // a7
+ 1 << 12 | // t0
+ 1 << 13 | // t1
+ 1 << 14 | // t2
+ 1 << 15 | // t3
+ 1 << 16 | // t4
+ 1 << 17 | // t5
+ 1 << 20; // t8
+
+const int kNumJSCallerSaved = 15;
+
+// Callee-saved registers preserved when switching from C to JavaScript.
+const RegList kCalleeSaved = 1 << 22 | // fp
+ 1 << 23 | // s0
+ 1 << 24 | // s1
+ 1 << 25 | // s2
+ 1 << 26 | // s3
+ 1 << 27 | // s4
+ 1 << 28 | // s5
+ 1 << 29 | // s6 (roots in Javascript code)
+ 1 << 30 | // s7 (cp in Javascript code)
+ 1 << 31; // s8
+
+const int kNumCalleeSaved = 10;
+
+const RegList kCalleeSavedFPU = 1 << 24 | // f24
+ 1 << 25 | // f25
+ 1 << 26 | // f26
+ 1 << 27 | // f27
+ 1 << 28 | // f28
+ 1 << 29 | // f29
+ 1 << 30 | // f30
+ 1 << 31; // f31
+
+const int kNumCalleeSavedFPU = 8;
+
+const RegList kCallerSavedFPU = 1 << 0 | // f0
+ 1 << 1 | // f1
+ 1 << 2 | // f2
+ 1 << 3 | // f3
+ 1 << 4 | // f4
+ 1 << 5 | // f5
+ 1 << 6 | // f6
+ 1 << 7 | // f7
+ 1 << 8 | // f8
+ 1 << 9 | // f9
+ 1 << 10 | // f10
+ 1 << 11 | // f11
+ 1 << 12 | // f12
+ 1 << 13 | // f13
+ 1 << 14 | // f14
+ 1 << 15 | // f15
+ 1 << 16 | // f16
+ 1 << 17 | // f17
+ 1 << 18 | // f18
+ 1 << 19 | // f19
+ 1 << 20 | // f20
+ 1 << 21 | // f21
+ 1 << 22 | // f22
+ 1 << 23; // f23
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+
+// -----------------------------------------------------------------------------
+// Implementation of Register and FPURegister.
+
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kRegAfterLast
+};
+
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ public:
+ static constexpr int kMantissaOffset = 0;
+ static constexpr int kExponentOffset = 4;
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr Register(int code) : RegisterBase(code) {}
+};
+
+// s7: context register
+// s3: scratch register
+// s4: scratch register 2
+#define DECLARE_REGISTER(R) \
+ constexpr Register R = Register::from_code(kRegCode_##R);
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+
+constexpr Register no_reg = Register::no_reg();
+
+int ToNumber(Register reg);
+
+Register ToRegister(int num);
+
+// Returns the number of padding slots needed for stack pointer alignment.
+constexpr int ArgumentPaddingSlots(int argument_count) {
+ // No argument padding required.
+ return 0;
+}
+
+constexpr bool kSimpleFPAliasing = true;
+constexpr bool kSimdMaskRegisters = false;
+
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kDoubleAfterLast
+};
+
+// FPURegister register.
+class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
+ public:
+ FPURegister low() const { return FPURegister::from_code(code()); }
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr FPURegister(int code) : RegisterBase(code) {}
+};
+
+// Condition Flag Register
+enum CFRegister { FCC0, FCC1, FCC2, FCC3, FCC4, FCC5, FCC6, FCC7 };
+
+using FloatRegister = FPURegister;
+
+using DoubleRegister = FPURegister;
+
+using Simd128Register = FPURegister;
+
+#define DECLARE_DOUBLE_REGISTER(R) \
+ constexpr DoubleRegister R = DoubleRegister::from_code(kDoubleCode_##R);
+DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
+#undef DECLARE_DOUBLE_REGISTER
+
+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
+
+// Register aliases.
+// cp is assumed to be a callee saved register.
+constexpr Register kRootRegister = s6;
+constexpr Register cp = s7;
+constexpr Register kScratchReg = s3;
+constexpr Register kScratchReg2 = s4;
+constexpr DoubleRegister kScratchDoubleReg = f30;
+constexpr DoubleRegister kScratchDoubleReg1 = f30;
+constexpr DoubleRegister kScratchDoubleReg2 = f31;
+// FPU zero reg is often used to hold 0.0, but it's not hardwired to 0.0.
+constexpr DoubleRegister kDoubleRegZero = f29;
+
+struct FPUControlRegister {
+ bool is_valid() const { return (reg_code >> 2) == 0; }
+ bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; }
+ int code() const {
+ DCHECK(is_valid());
+ return reg_code;
+ }
+ int bit() const {
+ DCHECK(is_valid());
+ return 1 << reg_code;
+ }
+ void setcode(int f) {
+ reg_code = f;
+ DCHECK(is_valid());
+ }
+ // Unfortunately we can't make this private in a struct.
+ int reg_code;
+};
+
+constexpr FPUControlRegister no_fpucreg = {kInvalidFPUControlRegister};
+constexpr FPUControlRegister FCSR = {kFCSRRegister};
+constexpr FPUControlRegister FCSR0 = {kFCSRRegister};
+constexpr FPUControlRegister FCSR1 = {kFCSRRegister + 1};
+constexpr FPUControlRegister FCSR2 = {kFCSRRegister + 2};
+constexpr FPUControlRegister FCSR3 = {kFCSRRegister + 3};
+
+// Define {RegisterName} methods for the register types.
+DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
+DEFINE_REGISTER_NAMES(FPURegister, DOUBLE_REGISTERS)
+
+// Give alias names to registers for calling conventions.
+constexpr Register kReturnRegister0 = a0;
+constexpr Register kReturnRegister1 = a1;
+constexpr Register kReturnRegister2 = a2;
+constexpr Register kJSFunctionRegister = a1;
+constexpr Register kContextRegister = s7;
+constexpr Register kAllocateSizeRegister = a0;
+constexpr Register kInterpreterAccumulatorRegister = a0;
+constexpr Register kInterpreterBytecodeOffsetRegister = t0;
+constexpr Register kInterpreterBytecodeArrayRegister = t1;
+constexpr Register kInterpreterDispatchTableRegister = t2;
+
+constexpr Register kJavaScriptCallArgCountRegister = a0;
+constexpr Register kJavaScriptCallCodeStartRegister = a2;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
+constexpr Register kJavaScriptCallNewTargetRegister = a3;
+constexpr Register kJavaScriptCallExtraArg1Register = a2;
+
+constexpr Register kOffHeapTrampolineRegister = t7;
+constexpr Register kRuntimeCallFunctionRegister = a1;
+constexpr Register kRuntimeCallArgCountRegister = a0;
+constexpr Register kRuntimeCallArgvRegister = a2;
+constexpr Register kWasmInstanceRegister = a0;
+constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
+
+constexpr DoubleRegister kFPReturnRegister0 = f0;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_
diff --git a/deps/v8/src/codegen/macro-assembler.h b/deps/v8/src/codegen/macro-assembler.h
index cfa7a4d341..02fa1cf3f9 100644
--- a/deps/v8/src/codegen/macro-assembler.h
+++ b/deps/v8/src/codegen/macro-assembler.h
@@ -57,6 +57,9 @@ enum class SmiCheck { kOmit, kInline };
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/constants-mips64.h"
#include "src/codegen/mips64/macro-assembler-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/loong64/macro-assembler-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/constants-s390.h"
#include "src/codegen/s390/macro-assembler-s390.h"
diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index 0d5a8710e5..dde08710fb 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -878,7 +878,6 @@ int Assembler::target_at(int pos, bool is_internal) {
}
}
}
- return 0;
}
static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 9c1af1cb05..32f85c6ec2 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -1398,8 +1398,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
BlockGrowBufferScope block_growbuffer(this);
int offset = pc_offset();
Address address = j.immediate();
- saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(offset, address));
+ saved_handles_for_raw_object_ptr_.emplace_back(offset, address);
Handle<HeapObject> object(reinterpret_cast<Address*>(address));
int32_t immediate = object->ptr();
RecordRelocInfo(j.rmode(), immediate);
@@ -3279,7 +3278,6 @@ bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
return BranchShortHelper(0, L, cond, rs, rt, bdslot);
}
}
- return false;
}
void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
@@ -3631,7 +3629,6 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
}
}
- return false;
}
void TurboAssembler::LoadFromConstantsTable(Register destination,
@@ -4987,15 +4984,19 @@ void MacroAssembler::AssertStackIsAligned() {
}
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
- Register scratch, BranchDelaySlot bd) {
+ BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
andi(scratch, value, kSmiTagMask);
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
}
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
- Register scratch, BranchDelaySlot bd) {
+ BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
andi(scratch, value, kSmiTagMask);
Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
}
@@ -5519,10 +5520,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
pop(ra); // Restore ra
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- li(kSpeculationPoisonRegister, -1);
-}
-
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index ffa5f5820d..f467f83bd0 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -795,7 +795,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Jump the register contains a smi.
void JumpIfSmi(Register value, Label* smi_label,
- Register scratch = kScratchReg, BranchDelaySlot bd = PROTECT);
+ BranchDelaySlot bd = PROTECT);
void JumpIfEqual(Register a, int32_t b, Label* dest) {
li(kScratchReg, Operand(b));
@@ -817,8 +817,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- void ResetSpeculationPoisonRegister();
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
@@ -1108,7 +1106,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
// Jump if the register contains a non-smi.
- void JumpIfNotSmi(Register value, Label* not_smi_label, Register scratch = at,
+ void JumpIfNotSmi(Register value, Label* not_smi_label,
BranchDelaySlot bd = PROTECT);
// Abort execution if argument is a smi, enabled via --debug-code.
diff --git a/deps/v8/src/codegen/mips/register-mips.h b/deps/v8/src/codegen/mips/register-mips.h
index 95164a86c1..7fd259bf9b 100644
--- a/deps/v8/src/codegen/mips/register-mips.h
+++ b/deps/v8/src/codegen/mips/register-mips.h
@@ -362,7 +362,6 @@ constexpr Register kReturnRegister2 = a0;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a0;
-constexpr Register kSpeculationPoisonRegister = t3;
constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t4;
constexpr Register kInterpreterBytecodeArrayRegister = t5;
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index 708cf4baa6..5ceb69e861 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -1918,8 +1918,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
BlockGrowBufferScope block_growbuffer(this);
int offset = pc_offset();
Address address = j.immediate();
- saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(offset, address));
+ saved_handles_for_raw_object_ptr_.emplace_back(offset, address);
Handle<HeapObject> object(reinterpret_cast<Address*>(address));
int64_t immediate = object->ptr();
RecordRelocInfo(j.rmode(), immediate);
@@ -3922,7 +3921,6 @@ bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
return BranchShortHelper(0, L, cond, rs, rt, bdslot);
}
}
- return false;
}
void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
@@ -4274,7 +4272,6 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
}
}
- return false;
}
void TurboAssembler::LoadFromConstantsTable(Register destination,
@@ -5532,15 +5529,19 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
- Register scratch, BranchDelaySlot bd) {
+ BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
andi(scratch, value, kSmiTagMask);
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
}
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
- Register scratch, BranchDelaySlot bd) {
+ BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
andi(scratch, value, kSmiTagMask);
Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
}
@@ -6059,10 +6060,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
pop(ra); // Restore ra
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- li(kSpeculationPoisonRegister, -1);
-}
-
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index a4991bcb1e..a0ebe35a93 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -805,7 +805,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MSARoundD(MSARegister dst, MSARegister src, FPURoundingMode mode);
// Jump the register contains a smi.
- void JumpIfSmi(Register value, Label* smi_label, Register scratch = at,
+ void JumpIfSmi(Register value, Label* smi_label,
BranchDelaySlot bd = PROTECT);
void JumpIfEqual(Register a, int32_t b, Label* dest) {
@@ -836,8 +836,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- void ResetSpeculationPoisonRegister();
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
@@ -1182,7 +1180,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
// Jump if the register contains a non-smi.
- void JumpIfNotSmi(Register value, Label* not_smi_label, Register scratch = at,
+ void JumpIfNotSmi(Register value, Label* not_smi_label,
BranchDelaySlot bd = PROTECT);
// Abort execution if argument is a smi, enabled via --debug-code.
diff --git a/deps/v8/src/codegen/mips64/register-mips64.h b/deps/v8/src/codegen/mips64/register-mips64.h
index 51b03aba1f..1fbe3ec7ac 100644
--- a/deps/v8/src/codegen/mips64/register-mips64.h
+++ b/deps/v8/src/codegen/mips64/register-mips64.h
@@ -373,7 +373,6 @@ constexpr Register kReturnRegister2 = a0;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a0;
-constexpr Register kSpeculationPoisonRegister = t3;
constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t0;
constexpr Register kInterpreterBytecodeArrayRegister = t1;
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index e3ca07a3c9..d0c4ed52e6 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -63,34 +63,10 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
ConfigureFlags();
}
-#ifdef DEBUG
-bool OptimizedCompilationInfo::FlagSetIsValid(Flag flag) const {
- switch (flag) {
- case kPoisonRegisterArguments:
- return untrusted_code_mitigations();
- default:
- return true;
- }
- UNREACHABLE();
-}
-
-bool OptimizedCompilationInfo::FlagGetIsValid(Flag flag) const {
- switch (flag) {
- case kPoisonRegisterArguments:
- if (!GetFlag(kPoisonRegisterArguments)) return true;
- return untrusted_code_mitigations() && called_with_code_start_register();
- default:
- return true;
- }
- UNREACHABLE();
-}
-#endif // DEBUG
-
void OptimizedCompilationInfo::ConfigureFlags() {
- if (FLAG_untrusted_code_mitigations) set_untrusted_code_mitigations();
if (FLAG_turbo_inline_js_wasm_calls) set_inline_js_wasm_calls();
- if (!is_osr() && (IsTurboprop() || FLAG_concurrent_inlining)) {
+ if (IsTurboprop() || FLAG_concurrent_inlining) {
set_concurrent_inlining();
}
@@ -104,7 +80,6 @@ void OptimizedCompilationInfo::ConfigureFlags() {
case CodeKind::TURBOPROP:
set_called_with_code_start_register();
set_switch_jump_table();
- if (FLAG_untrusted_code_mitigations) set_poison_register_arguments();
// TODO(yangguo): Disable this in case of debugging for crbug.com/826613
if (FLAG_analyze_environment_liveness) set_analyze_environment_liveness();
break;
@@ -123,8 +98,15 @@ void OptimizedCompilationInfo::ConfigureFlags() {
case CodeKind::WASM_TO_CAPI_FUNCTION:
set_switch_jump_table();
break;
- default:
+ case CodeKind::C_WASM_ENTRY:
+ case CodeKind::JS_TO_JS_FUNCTION:
+ case CodeKind::JS_TO_WASM_FUNCTION:
+ case CodeKind::WASM_TO_JS_FUNCTION:
break;
+ case CodeKind::BASELINE:
+ case CodeKind::INTERPRETED_FUNCTION:
+ case CodeKind::REGEXP:
+ UNREACHABLE();
}
}
diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h
index b7ed0d29c4..d92964c796 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.h
+++ b/deps/v8/src/codegen/optimized-compilation-info.h
@@ -58,21 +58,19 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
V(SourcePositions, source_positions, 4) \
V(BailoutOnUninitialized, bailout_on_uninitialized, 5) \
V(LoopPeeling, loop_peeling, 6) \
- V(UntrustedCodeMitigations, untrusted_code_mitigations, 7) \
- V(SwitchJumpTable, switch_jump_table, 8) \
- V(CalledWithCodeStartRegister, called_with_code_start_register, 9) \
- V(PoisonRegisterArguments, poison_register_arguments, 10) \
- V(AllocationFolding, allocation_folding, 11) \
- V(AnalyzeEnvironmentLiveness, analyze_environment_liveness, 12) \
- V(TraceTurboJson, trace_turbo_json, 13) \
- V(TraceTurboGraph, trace_turbo_graph, 14) \
- V(TraceTurboScheduled, trace_turbo_scheduled, 15) \
- V(TraceTurboAllocation, trace_turbo_allocation, 16) \
- V(TraceHeapBroker, trace_heap_broker, 17) \
- V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 18) \
- V(ConcurrentInlining, concurrent_inlining, 19) \
- V(DiscardResultForTesting, discard_result_for_testing, 20) \
- V(InlineJSWasmCalls, inline_js_wasm_calls, 21)
+ V(SwitchJumpTable, switch_jump_table, 7) \
+ V(CalledWithCodeStartRegister, called_with_code_start_register, 8) \
+ V(AllocationFolding, allocation_folding, 9) \
+ V(AnalyzeEnvironmentLiveness, analyze_environment_liveness, 10) \
+ V(TraceTurboJson, trace_turbo_json, 11) \
+ V(TraceTurboGraph, trace_turbo_graph, 12) \
+ V(TraceTurboScheduled, trace_turbo_scheduled, 13) \
+ V(TraceTurboAllocation, trace_turbo_allocation, 14) \
+ V(TraceHeapBroker, trace_heap_broker, 15) \
+ V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 16) \
+ V(ConcurrentInlining, concurrent_inlining, 17) \
+ V(DiscardResultForTesting, discard_result_for_testing, 18) \
+ V(InlineJSWasmCalls, inline_js_wasm_calls, 19)
enum Flag {
#define DEF_ENUM(Camel, Lower, Bit) k##Camel = 1 << Bit,
@@ -82,7 +80,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
#define DEF_GETTER(Camel, Lower, Bit) \
bool Lower() const { \
- DCHECK(FlagGetIsValid(k##Camel)); \
return GetFlag(k##Camel); \
}
FLAGS(DEF_GETTER)
@@ -90,17 +87,11 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
#define DEF_SETTER(Camel, Lower, Bit) \
void set_##Lower() { \
- DCHECK(FlagSetIsValid(k##Camel)); \
SetFlag(k##Camel); \
}
FLAGS(DEF_SETTER)
#undef DEF_SETTER
-#ifdef DEBUG
- bool FlagGetIsValid(Flag flag) const;
- bool FlagSetIsValid(Flag flag) const;
-#endif // DEBUG
-
// Construct a compilation info for optimized compilation.
OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
@@ -141,13 +132,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
}
compiler::NodeObserver* node_observer() const { return node_observer_; }
- void SetPoisoningMitigationLevel(PoisoningMitigationLevel poisoning_level) {
- poisoning_level_ = poisoning_level;
- }
- PoisoningMitigationLevel GetPoisoningMitigationLevel() const {
- return poisoning_level_;
- }
-
// Code getters and setters.
void SetCode(Handle<Code> code);
@@ -269,8 +253,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// Compilation flags.
unsigned flags_ = 0;
- PoisoningMitigationLevel poisoning_level_ =
- PoisoningMitigationLevel::kDontPoison;
const CodeKind code_kind_;
Builtin builtin_ = Builtin::kNoBuiltinId;
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index 2c568b3f3f..3e154e4c29 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -187,13 +187,13 @@ Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
return result;
}
-MemOperand::MemOperand(Register rn, int32_t offset)
+MemOperand::MemOperand(Register rn, int64_t offset)
: ra_(rn), offset_(offset), rb_(no_reg) {}
MemOperand::MemOperand(Register ra, Register rb)
: ra_(ra), offset_(0), rb_(rb) {}
-MemOperand::MemOperand(Register ra, Register rb, int32_t offset)
+MemOperand::MemOperand(Register ra, Register rb, int64_t offset)
: ra_(ra), offset_(offset), rb_(rb) {}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
@@ -303,7 +303,6 @@ Condition Assembler::GetCondition(Instr instr) {
default:
UNIMPLEMENTED();
}
- return al;
}
bool Assembler::IsLis(Instr instr) {
@@ -1621,8 +1620,8 @@ void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
}
void Assembler::fcpsgn(const DoubleRegister frt, const DoubleRegister fra,
- const DoubleRegister frc, RCBit rc) {
- emit(EXT4 | FCPSGN | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
+ const DoubleRegister frb, RCBit rc) {
+ emit(EXT4 | FCPSGN | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
rc);
}
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index f46090cec5..2b5c156204 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -133,13 +133,13 @@ class V8_EXPORT_PRIVATE Operand {
// Alternatively we can have a 16bit signed value immediate
class V8_EXPORT_PRIVATE MemOperand {
public:
- explicit MemOperand(Register rn, int32_t offset = 0);
+ explicit MemOperand(Register rn, int64_t offset = 0);
explicit MemOperand(Register ra, Register rb);
- explicit MemOperand(Register ra, Register rb, int32_t offset);
+ explicit MemOperand(Register ra, Register rb, int64_t offset);
- int32_t offset() const { return offset_; }
+ int64_t offset() const { return offset_; }
// PowerPC - base register
Register ra() const { return ra_; }
@@ -148,7 +148,7 @@ class V8_EXPORT_PRIVATE MemOperand {
private:
Register ra_; // base
- int32_t offset_; // offset
+ int64_t offset_; // offset
Register rb_; // index
friend class Assembler;
@@ -373,6 +373,11 @@ class Assembler : public AssemblerBase {
x_form(instr_name, cr.code() * B2, src1.code(), src2.code(), LeaveRC); \
}
+#define DECLARE_PPC_X_INSTRUCTIONS_G_FORM(name, instr_name, instr_value) \
+ inline void name(const Register dst, const Register src) { \
+ x_form(instr_name, src, dst, r0, LeaveRC); \
+ }
+
#define DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM(name, instr_name, instr_value) \
inline void name(const Register dst, const MemOperand& src) { \
x_form(instr_name, src.ra(), dst, src.rb(), SetEH); \
@@ -411,6 +416,7 @@ class Assembler : public AssemblerBase {
PPC_X_OPCODE_D_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_D_FORM)
PPC_X_OPCODE_E_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_E_FORM)
PPC_X_OPCODE_F_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_F_FORM)
+ PPC_X_OPCODE_G_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_G_FORM)
PPC_X_OPCODE_EH_S_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM)
PPC_X_OPCODE_EH_L_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM)
@@ -442,26 +448,40 @@ class Assembler : public AssemblerBase {
#undef DECLARE_PPC_X_INSTRUCTIONS_D_FORM
#undef DECLARE_PPC_X_INSTRUCTIONS_E_FORM
#undef DECLARE_PPC_X_INSTRUCTIONS_F_FORM
+#undef DECLARE_PPC_X_INSTRUCTIONS_G_FORM
#undef DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM
#undef DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM
-#define DECLARE_PPC_XX2_INSTRUCTIONS(name, instr_name, instr_value) \
- inline void name(const Simd128Register rt, const Simd128Register rb) { \
- xx2_form(instr_name, rt, rb); \
+#define DECLARE_PPC_XX2_VECTOR_INSTRUCTIONS(name, instr_name, instr_value) \
+ inline void name(const Simd128Register rt, const Simd128Register rb) { \
+ xx2_form(instr_name, rt, rb); \
+ }
+#define DECLARE_PPC_XX2_SCALAR_INSTRUCTIONS(name, instr_name, instr_value) \
+ inline void name(const DoubleRegister rt, const DoubleRegister rb) { \
+ xx2_form(instr_name, rt, rb); \
}
- inline void xx2_form(Instr instr, Simd128Register t, Simd128Register b) {
- // Using VR (high VSR) registers.
- int BX = 1;
- int TX = 1;
+ template <typename T>
+ inline void xx2_form(Instr instr, T t, T b) {
+ static_assert(std::is_same<T, Simd128Register>::value ||
+ std::is_same<T, DoubleRegister>::value,
+ "VSX only uses FP or Vector registers.");
+ // Using FP (low VSR) registers.
+ int BX = 0, TX = 0;
+ // Using VR (high VSR) registers when Simd registers are used.
+ if (std::is_same<T, Simd128Register>::value) {
+ BX = TX = 1;
+ }
emit(instr | (t.code() & 0x1F) * B21 | (b.code() & 0x1F) * B11 | BX * B1 |
TX);
}
- PPC_XX2_OPCODE_A_FORM_LIST(DECLARE_PPC_XX2_INSTRUCTIONS)
- PPC_XX2_OPCODE_B_FORM_LIST(DECLARE_PPC_XX2_INSTRUCTIONS)
-#undef DECLARE_PPC_XX2_INSTRUCTIONS
+ PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(DECLARE_PPC_XX2_VECTOR_INSTRUCTIONS)
+ PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(DECLARE_PPC_XX2_SCALAR_INSTRUCTIONS)
+ PPC_XX2_OPCODE_B_FORM_LIST(DECLARE_PPC_XX2_VECTOR_INSTRUCTIONS)
+#undef DECLARE_PPC_XX2_VECTOR_INSTRUCTIONS
+#undef DECLARE_PPC_XX2_SCALAR_INSTRUCTIONS
#define DECLARE_PPC_XX3_VECTOR_INSTRUCTIONS(name, instr_name, instr_value) \
inline void name(const Simd128Register rt, const Simd128Register ra, \
diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h
index e7f1ff311d..693f13d43e 100644
--- a/deps/v8/src/codegen/ppc/constants-ppc.h
+++ b/deps/v8/src/codegen/ppc/constants-ppc.h
@@ -364,7 +364,7 @@ using Instr = uint32_t;
/* Decimal Floating Test Data Group Quad */ \
V(dtstdgq, DTSTDGQ, 0xFC0001C4)
-#define PPC_XX2_OPCODE_A_FORM_LIST(V) \
+#define PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(V) \
/* VSX Vector Absolute Value Double-Precision */ \
V(xvabsdp, XVABSDP, 0xF0000764) \
/* VSX Vector Negate Double-Precision */ \
@@ -423,6 +423,14 @@ using Instr = uint32_t;
/* Saturate */ \
V(xvcvdpuxws, XVCVDPUXWS, 0xF0000320)
+#define PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(V) \
+ /* VSX Scalar Convert Double-Precision to Single-Precision format Non- */ \
+ /* signalling */ \
+ V(xscvdpspn, XSCVDPSPN, 0xF000042C) \
+ /* VSX Scalar Convert Single-Precision to Double-Precision format Non- */ \
+ /* signalling */ \
+ V(xscvspdpn, XSCVSPDPN, 0xF000052C)
+
#define PPC_XX2_OPCODE_B_FORM_LIST(V) \
/* Vector Byte-Reverse Quadword */ \
V(xxbrq, XXBRQ, 0xF01F076C)
@@ -440,9 +448,6 @@ using Instr = uint32_t;
V(xsabsdp, XSABSDP, 0xF0000564) \
/* VSX Scalar Convert Double-Precision to Single-Precision */ \
V(xscvdpsp, XSCVDPSP, 0xF0000424) \
- /* VSX Scalar Convert Double-Precision to Single-Precision format Non- */ \
- /* signalling */ \
- V(xscvdpspn, XSCVDPSPN, 0xF000042C) \
/* VSX Scalar Convert Double-Precision to Signed Fixed-Point Doubleword */ \
/* Saturate */ \
V(xscvdpsxds, XSCVDPSXDS, 0xF0000560) \
@@ -457,9 +462,6 @@ using Instr = uint32_t;
V(xscvdpuxws, XSCVDPUXWS, 0xF0000120) \
/* VSX Scalar Convert Single-Precision to Double-Precision (p=1) */ \
V(xscvspdp, XSCVSPDP, 0xF0000524) \
- /* Scalar Convert Single-Precision to Double-Precision format Non- */ \
- /* signalling */ \
- V(xscvspdpn, XSCVSPDPN, 0xF000052C) \
/* VSX Scalar Convert Signed Fixed-Point Doubleword to Double-Precision */ \
V(xscvsxddp, XSCVSXDDP, 0xF00005E0) \
/* VSX Scalar Convert Signed Fixed-Point Doubleword to Single-Precision */ \
@@ -531,9 +533,10 @@ using Instr = uint32_t;
/* Vector Splat Immediate Byte */ \
V(xxspltib, XXSPLTIB, 0xF00002D0)
-#define PPC_XX2_OPCODE_LIST(V) \
- PPC_XX2_OPCODE_A_FORM_LIST(V) \
- PPC_XX2_OPCODE_B_FORM_LIST(V) \
+#define PPC_XX2_OPCODE_LIST(V) \
+ PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(V) \
+ PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(V) \
+ PPC_XX2_OPCODE_B_FORM_LIST(V) \
PPC_XX2_OPCODE_UNUSED_LIST(V)
#define PPC_EVX_OPCODE_LIST(V) \
@@ -1267,6 +1270,14 @@ using Instr = uint32_t;
/* Compare Logical */ \
V(cmpl, CMPL, 0x7C000040)
+#define PPC_X_OPCODE_G_FORM_LIST(V) \
+ /* Byte-Reverse Halfword */ \
+ V(brh, BRH, 0x7C0001B6) \
+ /* Byte-Reverse Word */ \
+ V(brw, BRW, 0x7C000136) \
+ /* Byte-Reverse Doubleword */ \
+ V(brd, BRD, 0x7C000176)
+
#define PPC_X_OPCODE_EH_S_FORM_LIST(V) \
/* Store Byte Conditional Indexed */ \
V(stbcx, STBCX, 0x7C00056D) \
@@ -1737,6 +1748,7 @@ using Instr = uint32_t;
PPC_X_OPCODE_D_FORM_LIST(V) \
PPC_X_OPCODE_E_FORM_LIST(V) \
PPC_X_OPCODE_F_FORM_LIST(V) \
+ PPC_X_OPCODE_G_FORM_LIST(V) \
PPC_X_OPCODE_EH_L_FORM_LIST(V) \
PPC_X_OPCODE_UNUSED_LIST(V)
@@ -3006,7 +3018,8 @@ class Instruction {
}
opcode = extcode | BitField(10, 2);
switch (opcode) {
- PPC_XX2_OPCODE_A_FORM_LIST(OPCODE_CASES)
+ PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(OPCODE_CASES)
+ PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(OPCODE_CASES)
PPC_XX2_OPCODE_UNUSED_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
}
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index f243055490..64d94c68eb 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -168,8 +168,6 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
if (cond != al) b(NegateCondition(cond), &skip, cr);
- DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
-
mov(ip, Operand(target, rmode));
mtctr(ip);
bctr();
@@ -1252,6 +1250,9 @@ void TurboAssembler::EnterFrame(StackFrame::Type type,
mov(ip, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(ip);
}
+#if V8_ENABLE_WEBASSEMBLY
+ if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
+#endif // V8_ENABLE_WEBASSEMBLY
}
int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
@@ -2662,7 +2663,14 @@ void TurboAssembler::MovDoubleToInt64(
addi(sp, sp, Operand(kDoubleSize));
}
-void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
+void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src,
+ Register scratch) {
+ if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
+ ShiftLeftU64(scratch, src, Operand(32));
+ mtfprd(dst, scratch);
+ xscvspdpn(dst, dst);
+ return;
+ }
subi(sp, sp, Operand(kFloatSize));
stw(src, MemOperand(sp, 0));
nop(GROUP_ENDING_NOP); // LHS/RAW optimization
@@ -2670,7 +2678,13 @@ void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
addi(sp, sp, Operand(kFloatSize));
}
-void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
+void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src,
+ DoubleRegister scratch) {
+ if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
+ xscvdpspn(scratch, src);
+ mffprwz(dst, scratch);
+ return;
+ }
subi(sp, sp, Operand(kFloatSize));
stfs(src, MemOperand(sp, 0));
nop(GROUP_ENDING_NOP); // LHS/RAW optimization
@@ -2759,6 +2773,44 @@ void TurboAssembler::MulS32(Register dst, Register src, Register value, OEBit s,
extsw(dst, dst, r);
}
+void TurboAssembler::DivS64(Register dst, Register src, Register value, OEBit s,
+ RCBit r) {
+ divd(dst, src, value, s, r);
+}
+
+void TurboAssembler::DivU64(Register dst, Register src, Register value, OEBit s,
+ RCBit r) {
+ divdu(dst, src, value, s, r);
+}
+
+void TurboAssembler::DivS32(Register dst, Register src, Register value, OEBit s,
+ RCBit r) {
+ divw(dst, src, value, s, r);
+ extsw(dst, dst);
+}
+void TurboAssembler::DivU32(Register dst, Register src, Register value, OEBit s,
+ RCBit r) {
+ divwu(dst, src, value, s, r);
+ ZeroExtWord32(dst, dst);
+}
+
+void TurboAssembler::ModS64(Register dst, Register src, Register value) {
+ modsd(dst, src, value);
+}
+
+void TurboAssembler::ModU64(Register dst, Register src, Register value) {
+ modud(dst, src, value);
+}
+
+void TurboAssembler::ModS32(Register dst, Register src, Register value) {
+ modsw(dst, src, value);
+ extsw(dst, dst);
+}
+void TurboAssembler::ModU32(Register dst, Register src, Register value) {
+ moduw(dst, src, value);
+ ZeroExtWord32(dst, dst);
+}
+
void TurboAssembler::AndU64(Register dst, Register src, const Operand& value,
Register scratch, RCBit r) {
if (is_uint16(value.immediate()) && r == SetRC) {
@@ -3056,7 +3108,7 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
#define GenerateMemoryOperation(reg, mem, ri_op, rr_op) \
{ \
- int offset = mem.offset(); \
+ int64_t offset = mem.offset(); \
\
if (mem.rb() == no_reg) { \
if (!is_int16(offset)) { \
@@ -3085,7 +3137,7 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
#define GenerateMemoryOperationWithAlign(reg, mem, ri_op, rr_op) \
{ \
- int offset = mem.offset(); \
+ int64_t offset = mem.offset(); \
int misaligned = (offset & 3); \
\
if (mem.rb() == no_reg) { \
@@ -3265,7 +3317,7 @@ void TurboAssembler::StoreF64LE(DoubleRegister dst, const MemOperand& mem,
LoadU64(scratch, mem, scratch2);
StoreU64LE(scratch, mem, scratch2);
#else
- LoadF64(dst, mem, scratch);
+ StoreF64(dst, mem, scratch);
#endif
}
@@ -3276,7 +3328,7 @@ void TurboAssembler::StoreF32LE(DoubleRegister dst, const MemOperand& mem,
LoadU32(scratch, mem, scratch2);
StoreU32LE(scratch, mem, scratch2);
#else
- LoadF64(dst, mem, scratch);
+ StoreF32(dst, mem, scratch);
#endif
}
@@ -3453,10 +3505,6 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
addi(sp, sp, Operand(2 * kSimd128Size));
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- mov(kSpeculationPoisonRegister, Operand(-1));
-}
-
void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
CmpS64(x, Operand(y), r0);
beq(dest);
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index 035c29b1e5..2dfdb39dcc 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -201,6 +201,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch = r0, OEBit s = LeaveOE, RCBit r = LeaveRC);
void MulS32(Register dst, Register src, Register value, OEBit s = LeaveOE,
RCBit r = LeaveRC);
+ void DivS64(Register dst, Register src, Register value, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+ void DivU64(Register dst, Register src, Register value, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+ void DivS32(Register dst, Register src, Register value, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+ void DivU32(Register dst, Register src, Register value, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+ void ModS64(Register dst, Register src, Register value);
+ void ModU64(Register dst, Register src, Register value);
+ void ModS32(Register dst, Register src, Register value);
+ void ModU32(Register dst, Register src, Register value);
void AndU64(Register dst, Register src, const Operand& value,
Register scratch = r0, RCBit r = SetRC);
@@ -561,8 +573,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register dst_hi,
#endif
Register dst, DoubleRegister src);
- void MovIntToFloat(DoubleRegister dst, Register src);
- void MovFloatToInt(Register dst, DoubleRegister src);
+ void MovIntToFloat(DoubleRegister dst, Register src, Register scratch);
+ void MovFloatToInt(Register dst, DoubleRegister src, DoubleRegister scratch);
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<HeapObject> value,
@@ -735,8 +747,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- void ResetSpeculationPoisonRegister();
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
diff --git a/deps/v8/src/codegen/ppc/register-ppc.h b/deps/v8/src/codegen/ppc/register-ppc.h
index ffeb327055..68adfdb155 100644
--- a/deps/v8/src/codegen/ppc/register-ppc.h
+++ b/deps/v8/src/codegen/ppc/register-ppc.h
@@ -349,7 +349,6 @@ constexpr Register kReturnRegister2 = r5;
constexpr Register kJSFunctionRegister = r4;
constexpr Register kContextRegister = r30;
constexpr Register kAllocateSizeRegister = r4;
-constexpr Register kSpeculationPoisonRegister = r14;
constexpr Register kInterpreterAccumulatorRegister = r3;
constexpr Register kInterpreterBytecodeOffsetRegister = r15;
constexpr Register kInterpreterBytecodeArrayRegister = r16;
diff --git a/deps/v8/src/codegen/register-arch.h b/deps/v8/src/codegen/register-arch.h
index eb4cdb8789..d5ea2879da 100644
--- a/deps/v8/src/codegen/register-arch.h
+++ b/deps/v8/src/codegen/register-arch.h
@@ -22,6 +22,8 @@
#include "src/codegen/mips/register-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/register-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/register-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/register-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/codegen/register-configuration.cc b/deps/v8/src/codegen/register-configuration.cc
index aca5295c11..2fc97e2fec 100644
--- a/deps/v8/src/codegen/register-configuration.cc
+++ b/deps/v8/src/codegen/register-configuration.cc
@@ -60,6 +60,8 @@ static int get_num_allocatable_double_registers() {
kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_MIPS64
kMaxAllocatableDoubleRegisterCount;
+#elif V8_TARGET_ARCH_LOONG64
+ kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_PPC
kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_PPC64
@@ -102,42 +104,6 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultRegisterConfiguration,
GetDefaultRegisterConfiguration)
-// Allocatable registers with the masking register removed.
-class ArchDefaultPoisoningRegisterConfiguration : public RegisterConfiguration {
- public:
- ArchDefaultPoisoningRegisterConfiguration()
- : RegisterConfiguration(
- Register::kNumRegisters, DoubleRegister::kNumRegisters,
- kMaxAllocatableGeneralRegisterCount - 1,
- get_num_allocatable_double_registers(),
- InitializeGeneralRegisterCodes(), get_allocatable_double_codes(),
- kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE) {
- }
-
- private:
- static const int* InitializeGeneralRegisterCodes() {
- int filtered_index = 0;
- for (int i = 0; i < kMaxAllocatableGeneralRegisterCount; ++i) {
- if (kAllocatableGeneralCodes[i] != kSpeculationPoisonRegister.code()) {
- allocatable_general_codes_[filtered_index] =
- kAllocatableGeneralCodes[i];
- filtered_index++;
- }
- }
- DCHECK_EQ(filtered_index, kMaxAllocatableGeneralRegisterCount - 1);
- return allocatable_general_codes_;
- }
-
- static int
- allocatable_general_codes_[kMaxAllocatableGeneralRegisterCount - 1];
-};
-
-int ArchDefaultPoisoningRegisterConfiguration::allocatable_general_codes_
- [kMaxAllocatableGeneralRegisterCount - 1];
-
-DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultPoisoningRegisterConfiguration,
- GetDefaultPoisoningRegisterConfiguration)
-
// RestrictedRegisterConfiguration uses the subset of allocatable general
// registers the architecture support, which results into generating assembly
// to use less registers. Currently, it's only used by RecordWrite code stub.
@@ -184,10 +150,6 @@ const RegisterConfiguration* RegisterConfiguration::Default() {
return GetDefaultRegisterConfiguration();
}
-const RegisterConfiguration* RegisterConfiguration::Poisoning() {
- return GetDefaultPoisoningRegisterConfiguration();
-}
-
const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
RegList registers) {
int num = NumRegs(registers);
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index 0693d32459..7c4d85128f 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -320,7 +320,7 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() {
#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \
defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) || \
- defined(V8_TARGET_ARCH_RISCV64)
+ defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64)
return true;
#endif
}
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
index 0c322542a9..8cad060a47 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
@@ -57,6 +57,9 @@ static unsigned CpuFeaturesImpliedByCompiler() {
answer |= 1u << FPU;
#endif // def CAN_USE_FPU_INSTRUCTIONS
+#ifdef CAN_USE_RVV_INSTRUCTIONS
+ answer |= 1u << RISCV_SIMD;
+#endif // def CAN_USE_RVV_INSTRUCTIONS
return answer;
}
@@ -64,18 +67,20 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(RISCV_SIMD); }
void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesImpliedByCompiler();
-
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
-
// Probe for additional features at runtime.
base::CPU cpu;
if (cpu.has_fpu()) supported_ |= 1u << FPU;
+ // Set a static value on whether SIMD is supported.
+ // This variable is only used for certain archs to query SupportWasmSimd128()
+ // at runtime in builtins using an extern ref. Other callers should use
+ // CpuFeatures::SupportWasmSimd128().
+ CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
}
void CpuFeatures::PrintTarget() {}
void CpuFeatures::PrintFeatures() {}
-
int ToNumber(Register reg) {
DCHECK(reg.is_valid());
const int kNumbers[] = {
@@ -207,7 +212,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
- scratch_register_list_(t3.bit() | t5.bit() | s10.bit()),
+ VU(this),
+ scratch_register_list_(t3.bit() | t5.bit()),
constpool_(this) {
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
@@ -309,7 +315,6 @@ bool Assembler::IsCBranch(Instr instr) {
int Op = instr & kRvcOpcodeMask;
return Op == RO_C_BNEZ || Op == RO_C_BEQZ;
}
-
bool Assembler::IsJump(Instr instr) {
int Op = instr & kBaseOpcodeMask;
return Op == JAL || Op == JALR;
@@ -377,7 +382,7 @@ int Assembler::target_at(int pos, bool is_internal) {
} else {
return pos + imm13;
}
- } break;
+ }
case JAL: {
int32_t imm21 = JumpOffset(instr);
if (imm21 == kEndOfJumpChain) {
@@ -386,7 +391,7 @@ int Assembler::target_at(int pos, bool is_internal) {
} else {
return pos + imm21;
}
- } break;
+ }
case JALR: {
int32_t imm12 = instr >> 20;
if (imm12 == kEndOfJumpChain) {
@@ -395,7 +400,7 @@ int Assembler::target_at(int pos, bool is_internal) {
} else {
return pos + imm12;
}
- } break;
+ }
case LUI: {
Address pc = reinterpret_cast<Address>(buffer_start_ + pos);
pc = target_address_at(pc);
@@ -409,7 +414,7 @@ int Assembler::target_at(int pos, bool is_internal) {
DCHECK(pos > delta);
return pos - delta;
}
- } break;
+ }
case AUIPC: {
Instr instr_auipc = instr;
Instr instr_I = instr_at(pos + 4);
@@ -417,18 +422,18 @@ int Assembler::target_at(int pos, bool is_internal) {
int32_t offset = BrachlongOffset(instr_auipc, instr_I);
if (offset == kEndOfJumpChain) return kEndOfChain;
return offset + pos;
- } break;
+ }
case RO_C_J: {
int32_t offset = instruction->RvcImm11CJValue();
if (offset == kEndOfJumpChain) return kEndOfChain;
return offset + pos;
- } break;
+ }
case RO_C_BNEZ:
case RO_C_BEQZ: {
int32_t offset = instruction->RvcImm8BValue();
if (offset == kEndOfJumpChain) return kEndOfChain;
return pos + offset;
- } break;
+ }
default: {
if (instr == kEndOfJumpChain) {
return kEndOfChain;
@@ -437,7 +442,7 @@ int Assembler::target_at(int pos, bool is_internal) {
((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
return (imm18 + pos);
}
- } break;
+ }
}
}
@@ -511,7 +516,6 @@ static inline ShortInstr SetCJalOffset(int32_t pos, int32_t target_pos,
DCHECK(Assembler::IsCJal(instr | (imm11 & kImm11Mask)));
return instr | (imm11 & kImm11Mask);
}
-
static inline Instr SetCBranchOffset(int32_t pos, int32_t target_pos,
Instr instr) {
DCHECK(Assembler::IsCBranch(instr));
@@ -1137,6 +1141,102 @@ void Assembler::GenInstrCBA(uint8_t funct3, uint8_t funct2, Opcode opcode,
emit(instr);
}
+// OPIVV OPFVV OPMVV
+void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
+ VRegister vs1, VRegister vs2, MaskType mask) {
+ DCHECK(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1.code() & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+// OPMVV OPFVV
+void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, Register rd,
+ VRegister vs1, VRegister vs2, MaskType mask) {
+ DCHECK(opcode == OP_MVV || opcode == OP_FVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((rd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1.code() & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPIVX OPFVF OPMVX
+void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
+ Register rs1, VRegister vs2, MaskType mask) {
+ DCHECK(opcode == OP_IVX || opcode == OP_FVF || opcode == OP_MVX);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPMVX
+void Assembler::GenInstrV(uint8_t funct6, Register rd, Register rs1,
+ VRegister vs2, MaskType mask) {
+ Instr instr = (funct6 << kRvvFunct6Shift) | OP_MVX | (mask << kRvvVmShift) |
+ ((rd.code() & 0x1F) << kRvvVdShift) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+// OPIVI
+void Assembler::GenInstrV(uint8_t funct6, VRegister vd, int8_t imm5,
+ VRegister vs2, MaskType mask) {
+ DCHECK(is_uint5(imm5) || is_int5(imm5));
+ Instr instr = (funct6 << kRvvFunct6Shift) | OP_IVI | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ (((uint32_t)imm5 << kRvvImm5Shift) & kRvvImm5Mask) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// VL VS
+void Assembler::GenInstrV(Opcode opcode, uint8_t width, VRegister vd,
+ Register rs1, uint8_t umop, MaskType mask,
+ uint8_t IsMop, bool IsMew, uint8_t Nf) {
+ DCHECK(opcode == LOAD_FP || opcode == STORE_FP);
+ Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
+ ((width << kRvvWidthShift) & kRvvWidthMask) |
+ ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
+ ((umop << kRvvRs2Shift) & kRvvRs2Mask) |
+ ((mask << kRvvVmShift) & kRvvVmMask) |
+ ((IsMop << kRvvMopShift) & kRvvMopMask) |
+ ((IsMew << kRvvMewShift) & kRvvMewMask) |
+ ((Nf << kRvvNfShift) & kRvvNfMask);
+ emit(instr);
+}
+void Assembler::GenInstrV(Opcode opcode, uint8_t width, VRegister vd,
+ Register rs1, Register rs2, MaskType mask,
+ uint8_t IsMop, bool IsMew, uint8_t Nf) {
+ DCHECK(opcode == LOAD_FP || opcode == STORE_FP);
+ Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
+ ((width << kRvvWidthShift) & kRvvWidthMask) |
+ ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
+ ((rs2.code() << kRvvRs2Shift) & kRvvRs2Mask) |
+ ((mask << kRvvVmShift) & kRvvVmMask) |
+ ((IsMop << kRvvMopShift) & kRvvMopMask) |
+ ((IsMew << kRvvMewShift) & kRvvMewMask) |
+ ((Nf << kRvvNfShift) & kRvvNfMask);
+ emit(instr);
+}
+// VL VS AMO
+void Assembler::GenInstrV(Opcode opcode, uint8_t width, VRegister vd,
+ Register rs1, VRegister vs2, MaskType mask,
+ uint8_t IsMop, bool IsMew, uint8_t Nf) {
+ DCHECK(opcode == LOAD_FP || opcode == STORE_FP || opcode == AMO);
+ Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
+ ((width << kRvvWidthShift) & kRvvWidthMask) |
+ ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
+ ((vs2.code() << kRvvRs2Shift) & kRvvRs2Mask) |
+ ((mask << kRvvVmShift) & kRvvVmMask) |
+ ((IsMop << kRvvMopShift) & kRvvMopMask) |
+ ((IsMew << kRvvMewShift) & kRvvMewMask) |
+ ((Nf << kRvvNfShift) & kRvvNfMask);
+ emit(instr);
+}
// ----- Instruction class templates match those in the compiler
void Assembler::GenInstrBranchCC_rri(uint8_t funct3, Register rs1, Register rs2,
@@ -2328,8 +2428,538 @@ void Assembler::EBREAK() {
ebreak();
}
-// Privileged
+// RVV
+void Assembler::vmv_vv(VRegister vd, VRegister vs1) {
+ GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, v0, NoMask);
+}
+
+void Assembler::vmv_vx(VRegister vd, Register rs1) {
+ GenInstrV(VMV_FUNCT6, OP_IVX, vd, rs1, v0, NoMask);
+}
+
+void Assembler::vmv_vi(VRegister vd, uint8_t simm5) {
+ GenInstrV(VMV_FUNCT6, vd, simm5, v0, NoMask);
+}
+
+void Assembler::vmv_xs(Register rd, VRegister vs2) {
+ GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, v0, vs2, NoMask);
+}
+
+void Assembler::vmv_sx(VRegister vd, Register rs1) {
+ GenInstrV(VRXUNARY0_FUNCT6, OP_MVX, vd, rs1, v0, NoMask);
+}
+
+void Assembler::vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2) {
+ GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
+}
+
+void Assembler::vmerge_vx(VRegister vd, Register rs1, VRegister vs2) {
+ GenInstrV(VMV_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
+}
+
+void Assembler::vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
+ GenInstrV(VMV_FUNCT6, vd, imm5, vs2, Mask);
+}
+
+void Assembler::vadc_vv(VRegister vd, VRegister vs1, VRegister vs2) {
+ GenInstrV(VADC_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
+}
+
+void Assembler::vadc_vx(VRegister vd, Register rs1, VRegister vs2) {
+ GenInstrV(VADC_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
+}
+
+void Assembler::vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
+ GenInstrV(VADC_FUNCT6, vd, imm5, vs2, Mask);
+}
+
+void Assembler::vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2) {
+ GenInstrV(VMADC_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
+}
+
+void Assembler::vmadc_vx(VRegister vd, Register rs1, VRegister vs2) {
+ GenInstrV(VMADC_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
+}
+
+void Assembler::vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
+ GenInstrV(VMADC_FUNCT6, vd, imm5, vs2, Mask);
+}
+#define DEFINE_OPIVV(name, funct6) \
+ void Assembler::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_IVV, vd, vs1, vs2, mask); \
+ }
+
+#define DEFINE_OPIVX(name, funct6) \
+ void Assembler::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_IVX, vd, rs1, vs2, mask); \
+ }
+
+#define DEFINE_OPIVI(name, funct6) \
+ void Assembler::name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \
+ MaskType mask) { \
+ GenInstrV(funct6, vd, imm5, vs2, mask); \
+ }
+
+#define DEFINE_OPMVV(name, funct6) \
+ void Assembler::name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_MVV, vd, vs1, vs2, mask); \
+ }
+
+DEFINE_OPIVV(vadd, VADD_FUNCT6)
+DEFINE_OPIVX(vadd, VADD_FUNCT6)
+DEFINE_OPIVI(vadd, VADD_FUNCT6)
+DEFINE_OPIVV(vsub, VSUB_FUNCT6)
+DEFINE_OPIVX(vsub, VSUB_FUNCT6)
+DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
+DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
+DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
+DEFINE_OPIVX(vsaddu, VSADD_FUNCT6)
+DEFINE_OPIVV(vsaddu, VSADD_FUNCT6)
+DEFINE_OPIVI(vsaddu, VSADD_FUNCT6)
+DEFINE_OPIVX(vssub, VSSUB_FUNCT6)
+DEFINE_OPIVV(vssub, VSSUB_FUNCT6)
+DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6)
+DEFINE_OPIVV(vssubu, VSSUBU_FUNCT6)
+DEFINE_OPIVX(vrsub, VRSUB_FUNCT6)
+DEFINE_OPIVI(vrsub, VRSUB_FUNCT6)
+DEFINE_OPIVV(vminu, VMINU_FUNCT6)
+DEFINE_OPIVX(vminu, VMINU_FUNCT6)
+DEFINE_OPIVV(vmin, VMIN_FUNCT6)
+DEFINE_OPIVX(vmin, VMIN_FUNCT6)
+DEFINE_OPIVV(vmaxu, VMAXU_FUNCT6)
+DEFINE_OPIVX(vmaxu, VMAXU_FUNCT6)
+DEFINE_OPIVV(vmax, VMAX_FUNCT6)
+DEFINE_OPIVX(vmax, VMAX_FUNCT6)
+DEFINE_OPIVV(vand, VAND_FUNCT6)
+DEFINE_OPIVX(vand, VAND_FUNCT6)
+DEFINE_OPIVI(vand, VAND_FUNCT6)
+DEFINE_OPIVV(vor, VOR_FUNCT6)
+DEFINE_OPIVX(vor, VOR_FUNCT6)
+DEFINE_OPIVI(vor, VOR_FUNCT6)
+DEFINE_OPIVV(vxor, VXOR_FUNCT6)
+DEFINE_OPIVX(vxor, VXOR_FUNCT6)
+DEFINE_OPIVI(vxor, VXOR_FUNCT6)
+DEFINE_OPIVV(vrgather, VRGATHER_FUNCT6)
+DEFINE_OPIVX(vrgather, VRGATHER_FUNCT6)
+DEFINE_OPIVI(vrgather, VRGATHER_FUNCT6)
+
+DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6)
+DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6)
+DEFINE_OPIVX(vslideup, VSLIDEUP_FUNCT6)
+DEFINE_OPIVI(vslideup, VSLIDEUP_FUNCT6)
+
+DEFINE_OPIVV(vmseq, VMSEQ_FUNCT6)
+DEFINE_OPIVX(vmseq, VMSEQ_FUNCT6)
+DEFINE_OPIVI(vmseq, VMSEQ_FUNCT6)
+
+DEFINE_OPIVV(vmsne, VMSNE_FUNCT6)
+DEFINE_OPIVX(vmsne, VMSNE_FUNCT6)
+DEFINE_OPIVI(vmsne, VMSNE_FUNCT6)
+
+DEFINE_OPIVV(vmsltu, VMSLTU_FUNCT6)
+DEFINE_OPIVX(vmsltu, VMSLTU_FUNCT6)
+
+DEFINE_OPIVV(vmslt, VMSLT_FUNCT6)
+DEFINE_OPIVX(vmslt, VMSLT_FUNCT6)
+
+DEFINE_OPIVV(vmsle, VMSLE_FUNCT6)
+DEFINE_OPIVX(vmsle, VMSLE_FUNCT6)
+DEFINE_OPIVI(vmsle, VMSLE_FUNCT6)
+
+DEFINE_OPIVV(vmsleu, VMSLEU_FUNCT6)
+DEFINE_OPIVX(vmsleu, VMSLEU_FUNCT6)
+DEFINE_OPIVI(vmsleu, VMSLEU_FUNCT6)
+
+DEFINE_OPIVI(vmsgt, VMSGT_FUNCT6)
+DEFINE_OPIVX(vmsgt, VMSGT_FUNCT6)
+
+DEFINE_OPIVI(vmsgtu, VMSGTU_FUNCT6)
+DEFINE_OPIVX(vmsgtu, VMSGTU_FUNCT6)
+
+DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
+DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
+DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
+
+DEFINE_OPIVV(vsll, VSLL_FUNCT6)
+DEFINE_OPIVX(vsll, VSLL_FUNCT6)
+DEFINE_OPIVI(vsll, VSLL_FUNCT6)
+
+DEFINE_OPMVV(vredmaxu, VREDMAXU_FUNCT6)
+DEFINE_OPMVV(vredmax, VREDMAX_FUNCT6)
+DEFINE_OPMVV(vredmin, VREDMIN_FUNCT6)
+DEFINE_OPMVV(vredminu, VREDMINU_FUNCT6)
+#undef DEFINE_OPIVI
+#undef DEFINE_OPIVV
+#undef DEFINE_OPIVX
+
+void Assembler::vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail, MaskAgnosticType mask) {
+ int32_t zimm = GenZimm(vsew, vlmul, tail, mask);
+ Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ (((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x0 << 31;
+ emit(instr);
+}
+
+void Assembler::vsetivli(Register rd, uint8_t uimm, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail, MaskAgnosticType mask) {
+ DCHECK(is_uint5(uimm));
+ int32_t zimm = GenZimm(vsew, vlmul, tail, mask) & 0x3FF;
+ Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
+ ((uimm & 0x1F) << kRvvUimmShift) |
+ (((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x3 << 30;
+ emit(instr);
+}
+
+void Assembler::vsetvl(Register rd, Register rs1, Register rs2) {
+ Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((rs2.code() & 0x1F) << kRvvRs2Shift) | 0x40 << 25;
+ emit(instr);
+}
+
+uint8_t vsew_switch(VSew vsew) {
+ uint8_t width;
+ switch (vsew) {
+ case E8:
+ width = 0b000;
+ break;
+ case E16:
+ width = 0b101;
+ break;
+ case E32:
+ width = 0b110;
+ break;
+ case E64:
+ width = 0b111;
+ break;
+ case E128:
+ width = 0b000;
+ break;
+ case E256:
+ width = 0b101;
+ break;
+ case E512:
+ width = 0b110;
+ break;
+ case E1024:
+ width = 0b111;
+ break;
+ }
+ return width;
+}
+
+void Assembler::vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b000);
+}
+void Assembler::vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b000);
+}
+void Assembler::vlx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, vs2, mask, 0b11, IsMew, 0);
+}
+
+void Assembler::vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b000);
+}
+void Assembler::vss(VRegister vs3, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vs3, rs1, rs2, mask, 0b10, IsMew, 0b000);
+}
+
+void Assembler::vsx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b11, IsMew, 0b000);
+}
+void Assembler::vsu(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b01, IsMew, 0b000);
+}
+
+void Assembler::vlseg2(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b001);
+}
+
+void Assembler::vlseg3(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b010);
+}
+
+void Assembler::vlseg4(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b011);
+}
+
+void Assembler::vlseg5(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b100);
+}
+
+void Assembler::vlseg6(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b101);
+}
+
+void Assembler::vlseg7(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b110);
+}
+
+void Assembler::vlseg8(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b111);
+}
+void Assembler::vsseg2(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b001);
+}
+void Assembler::vsseg3(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b010);
+}
+void Assembler::vsseg4(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b011);
+}
+void Assembler::vsseg5(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b100);
+}
+void Assembler::vsseg6(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b101);
+}
+void Assembler::vsseg7(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b110);
+}
+void Assembler::vsseg8(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b111);
+}
+
+void Assembler::vlsseg2(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b001);
+}
+void Assembler::vlsseg3(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b010);
+}
+void Assembler::vlsseg4(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b011);
+}
+void Assembler::vlsseg5(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b100);
+}
+void Assembler::vlsseg6(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b101);
+}
+void Assembler::vlsseg7(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b110);
+}
+void Assembler::vlsseg8(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b111);
+}
+void Assembler::vssseg2(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b001);
+}
+void Assembler::vssseg3(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b010);
+}
+void Assembler::vssseg4(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b011);
+}
+void Assembler::vssseg5(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b100);
+}
+void Assembler::vssseg6(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b101);
+}
+void Assembler::vssseg7(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b110);
+}
+void Assembler::vssseg8(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b111);
+}
+
+void Assembler::vlxseg2(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b001);
+}
+void Assembler::vlxseg3(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b010);
+}
+void Assembler::vlxseg4(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b011);
+}
+void Assembler::vlxseg5(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b100);
+}
+void Assembler::vlxseg6(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b101);
+}
+void Assembler::vlxseg7(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b110);
+}
+void Assembler::vlxseg8(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b111);
+}
+void Assembler::vsxseg2(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b001);
+}
+void Assembler::vsxseg3(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b010);
+}
+void Assembler::vsxseg4(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b011);
+}
+void Assembler::vsxseg5(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b100);
+}
+void Assembler::vsxseg6(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b101);
+}
+void Assembler::vsxseg7(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b110);
+}
+void Assembler::vsxseg8(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b111);
+}
+
+// Privileged
void Assembler::uret() {
GenInstrPriv(0b0000000, ToRegister(0), ToRegister(0b00010));
}
@@ -2723,8 +3353,6 @@ void Assembler::AdjustBaseAndOffset(MemOperand* src, Register scratch,
// for a load/store when the offset doesn't fit into int12.
// Must not overwrite the register 'base' while loading 'offset'.
- DCHECK(src->rm() != scratch);
-
constexpr int32_t kMinOffsetForSimpleAdjustment = 0x7F8;
constexpr int32_t kMaxOffsetForSimpleAdjustment =
2 * kMinOffsetForSimpleAdjustment;
@@ -2766,7 +3394,6 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
return 8; // Number of instructions patched.
} else {
UNIMPLEMENTED();
- return 1;
}
}
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.h b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
index 88e403d366..7da77f8e0e 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
@@ -666,6 +666,207 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void NOP();
void EBREAK();
+ // RVV
+ static int32_t GenZimm(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ return (mask << 7) | (tail << 6) | ((vsew & 0x7) << 3) | (vlmul & 0x7);
+ }
+
+ void vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
+
+ void vsetivli(Register rd, uint8_t uimm, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
+
+ inline void vsetvlmax(Register rd, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ vsetvli(rd, zero_reg, vsew, vlmul, tu, mu);
+ }
+
+ inline void vsetvl(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ vsetvli(zero_reg, zero_reg, vsew, vlmul, tu, mu);
+ }
+
+ void vsetvl(Register rd, Register rs1, Register rs2);
+
+ void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask = NoMask);
+ void vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask = NoMask);
+ void vlx(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
+ MaskType mask = NoMask);
+
+ void vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask = NoMask);
+ void vss(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask = NoMask);
+ void vsx(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
+ MaskType mask = NoMask);
+
+ void vsu(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
+ MaskType mask = NoMask);
+
+#define SegInstr(OP) \
+ void OP##seg2(ARG); \
+ void OP##seg3(ARG); \
+ void OP##seg4(ARG); \
+ void OP##seg5(ARG); \
+ void OP##seg6(ARG); \
+ void OP##seg7(ARG); \
+ void OP##seg8(ARG);
+
+#define ARG \
+ VRegister vd, Register rs1, uint8_t lumop, VSew vsew, MaskType mask = NoMask
+
+ SegInstr(vl) SegInstr(vs)
+#undef ARG
+
+#define ARG \
+ VRegister vd, Register rs1, Register rs2, VSew vsew, MaskType mask = NoMask
+
+ SegInstr(vls) SegInstr(vss)
+#undef ARG
+
+#define ARG \
+ VRegister vd, Register rs1, VRegister rs2, VSew vsew, MaskType mask = NoMask
+
+ SegInstr(vsx) SegInstr(vlx)
+#undef ARG
+#undef SegInstr
+
+ // RVV Vector Arithmetic Instruction
+
+ void vmv_vv(VRegister vd, VRegister vs1);
+ void vmv_vx(VRegister vd, Register rs1);
+ void vmv_vi(VRegister vd, uint8_t simm5);
+ void vmv_xs(Register rd, VRegister vs2);
+ void vmv_sx(VRegister vd, Register rs1);
+ void vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2);
+ void vmerge_vx(VRegister vd, Register rs1, VRegister vs2);
+ void vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+
+ void vadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
+ void vadc_vx(VRegister vd, Register rs1, VRegister vs2);
+ void vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+
+ void vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
+ void vmadc_vx(VRegister vd, Register rs1, VRegister vs2);
+ void vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+
+#define DEFINE_OPIVV(name, funct6) \
+ void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+#define DEFINE_OPIVX(name, funct6) \
+ void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask = NoMask);
+
+#define DEFINE_OPIVI(name, funct6) \
+ void name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \
+ MaskType mask = NoMask);
+
+#define DEFINE_OPMVV(name, funct6) \
+ void name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+#define DEFINE_OPMVX(name, funct6) \
+ void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask = NoMask);
+
+ DEFINE_OPIVV(vadd, VADD_FUNCT6)
+ DEFINE_OPIVX(vadd, VADD_FUNCT6)
+ DEFINE_OPIVI(vadd, VADD_FUNCT6)
+ DEFINE_OPIVV(vsub, VSUB_FUNCT6)
+ DEFINE_OPIVX(vsub, VSUB_FUNCT6)
+ DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
+ DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
+ DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
+ DEFINE_OPIVX(vsaddu, VSADD_FUNCT6)
+ DEFINE_OPIVV(vsaddu, VSADD_FUNCT6)
+ DEFINE_OPIVI(vsaddu, VSADD_FUNCT6)
+ DEFINE_OPIVX(vssub, VSSUB_FUNCT6)
+ DEFINE_OPIVV(vssub, VSSUB_FUNCT6)
+ DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6)
+ DEFINE_OPIVV(vssubu, VSSUBU_FUNCT6)
+ DEFINE_OPIVX(vrsub, VRSUB_FUNCT6)
+ DEFINE_OPIVI(vrsub, VRSUB_FUNCT6)
+ DEFINE_OPIVV(vminu, VMINU_FUNCT6)
+ DEFINE_OPIVX(vminu, VMINU_FUNCT6)
+ DEFINE_OPIVV(vmin, VMIN_FUNCT6)
+ DEFINE_OPIVX(vmin, VMIN_FUNCT6)
+ DEFINE_OPIVV(vmaxu, VMAXU_FUNCT6)
+ DEFINE_OPIVX(vmaxu, VMAXU_FUNCT6)
+ DEFINE_OPIVV(vmax, VMAX_FUNCT6)
+ DEFINE_OPIVX(vmax, VMAX_FUNCT6)
+ DEFINE_OPIVV(vand, VAND_FUNCT6)
+ DEFINE_OPIVX(vand, VAND_FUNCT6)
+ DEFINE_OPIVI(vand, VAND_FUNCT6)
+ DEFINE_OPIVV(vor, VOR_FUNCT6)
+ DEFINE_OPIVX(vor, VOR_FUNCT6)
+ DEFINE_OPIVI(vor, VOR_FUNCT6)
+ DEFINE_OPIVV(vxor, VXOR_FUNCT6)
+ DEFINE_OPIVX(vxor, VXOR_FUNCT6)
+ DEFINE_OPIVI(vxor, VXOR_FUNCT6)
+ DEFINE_OPIVV(vrgather, VRGATHER_FUNCT6)
+ DEFINE_OPIVX(vrgather, VRGATHER_FUNCT6)
+ DEFINE_OPIVI(vrgather, VRGATHER_FUNCT6)
+
+ DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6)
+ DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6)
+ DEFINE_OPIVX(vslideup, VSLIDEUP_FUNCT6)
+ DEFINE_OPIVI(vslideup, VSLIDEUP_FUNCT6)
+
+ DEFINE_OPIVV(vmseq, VMSEQ_FUNCT6)
+ DEFINE_OPIVX(vmseq, VMSEQ_FUNCT6)
+ DEFINE_OPIVI(vmseq, VMSEQ_FUNCT6)
+
+ DEFINE_OPIVV(vmsne, VMSNE_FUNCT6)
+ DEFINE_OPIVX(vmsne, VMSNE_FUNCT6)
+ DEFINE_OPIVI(vmsne, VMSNE_FUNCT6)
+
+ DEFINE_OPIVV(vmsltu, VMSLTU_FUNCT6)
+ DEFINE_OPIVX(vmsltu, VMSLTU_FUNCT6)
+
+ DEFINE_OPIVV(vmslt, VMSLT_FUNCT6)
+ DEFINE_OPIVX(vmslt, VMSLT_FUNCT6)
+
+ DEFINE_OPIVV(vmsle, VMSLE_FUNCT6)
+ DEFINE_OPIVX(vmsle, VMSLE_FUNCT6)
+ DEFINE_OPIVI(vmsle, VMSLE_FUNCT6)
+
+ DEFINE_OPIVV(vmsleu, VMSLEU_FUNCT6)
+ DEFINE_OPIVX(vmsleu, VMSLEU_FUNCT6)
+ DEFINE_OPIVI(vmsleu, VMSLEU_FUNCT6)
+
+ DEFINE_OPIVI(vmsgt, VMSGT_FUNCT6)
+ DEFINE_OPIVX(vmsgt, VMSGT_FUNCT6)
+
+ DEFINE_OPIVI(vmsgtu, VMSGTU_FUNCT6)
+ DEFINE_OPIVX(vmsgtu, VMSGTU_FUNCT6)
+
+ DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
+ DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
+ DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
+
+ DEFINE_OPIVV(vsll, VSLL_FUNCT6)
+ DEFINE_OPIVX(vsll, VSLL_FUNCT6)
+ DEFINE_OPIVI(vsll, VSLL_FUNCT6)
+
+ DEFINE_OPMVV(vredmaxu, VREDMAXU_FUNCT6)
+ DEFINE_OPMVV(vredmax, VREDMAX_FUNCT6)
+ DEFINE_OPMVV(vredmin, VREDMIN_FUNCT6)
+ DEFINE_OPMVV(vredminu, VREDMINU_FUNCT6)
+#undef DEFINE_OPIVI
+#undef DEFINE_OPIVV
+#undef DEFINE_OPIVX
+#undef DEFINE_OPMVV
+#undef DEFINE_OPMVX
+
+ void vnot_vv(VRegister dst, VRegister src) { vxor_vi(dst, src, -1); }
+
+ void vneg_vv(VRegister dst, VRegister src) { vrsub_vx(dst, src, zero_reg); }
// Privileged
void uret();
void sret();
@@ -942,6 +1143,55 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
constpool_.RecordEntry(data, rmode);
}
+ class VectorUnit {
+ public:
+ inline int32_t sew() const { return 2 ^ (sew_ + 3); }
+
+ inline int32_t vlmax() const {
+ if ((lmul_ & 0b100) != 0) {
+ return (kRvvVLEN / sew()) >> (lmul_ & 0b11);
+ } else {
+ return ((kRvvVLEN << lmul_) / sew());
+ }
+ }
+
+ explicit VectorUnit(Assembler* assm) : assm_(assm) {}
+
+ void set(Register rd, VSew sew, Vlmul lmul) {
+ if (sew != sew_ || lmul != lmul_ || vl != vlmax()) {
+ sew_ = sew;
+ lmul_ = lmul;
+ vl = vlmax();
+ assm_->vsetvlmax(rd, sew_, lmul_);
+ }
+ }
+
+ void set(Register rd, Register rs1, VSew sew, Vlmul lmul) {
+ if (sew != sew_ || lmul != lmul_) {
+ sew_ = sew;
+ lmul_ = lmul;
+ vl = 0;
+ assm_->vsetvli(rd, rs1, sew_, lmul_);
+ }
+ }
+
+ void set(VSew sew, Vlmul lmul) {
+ if (sew != sew_ || lmul != lmul_) {
+ sew_ = sew;
+ lmul_ = lmul;
+ assm_->vsetvl(sew_, lmul_);
+ }
+ }
+
+ private:
+ VSew sew_ = E8;
+ Vlmul lmul_ = m1;
+ int32_t vl = 0;
+ Assembler* assm_;
+ };
+
+ VectorUnit VU;
+
protected:
// Readable constants for base and offset adjustment helper, these indicate if
// aside from offset, another value like offset + 4 should fit into int16.
@@ -1192,6 +1442,42 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd,
FPURegister rs1, FPURegister rs2);
+ // ----------------------------RVV------------------------------------------
+ // vsetvl
+ void GenInstrV(Register rd, Register rs1, Register rs2);
+ // vsetvli
+ void GenInstrV(Register rd, Register rs1, uint32_t zimm);
+ // OPIVV OPFVV OPMVV
+ void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, VRegister vs1,
+ VRegister vs2, MaskType mask = NoMask);
+ // OPMVV OPFVV
+ void GenInstrV(uint8_t funct6, Opcode opcode, Register rd, VRegister vs1,
+ VRegister vs2, MaskType mask = NoMask);
+
+ // OPIVX OPFVF OPMVX
+ void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, Register rs1,
+ VRegister vs2, MaskType mask = NoMask);
+
+ // OPMVX
+ void GenInstrV(uint8_t funct6, Register rd, Register rs1, VRegister vs2,
+ MaskType mask = NoMask);
+ // OPIVI
+ void GenInstrV(uint8_t funct6, VRegister vd, int8_t simm5, VRegister vs2,
+ MaskType mask = NoMask);
+
+ // VL VS
+ void GenInstrV(Opcode opcode, uint8_t width, VRegister vd, Register rs1,
+ uint8_t umop, MaskType mask, uint8_t IsMop, bool IsMew,
+ uint8_t Nf);
+
+ void GenInstrV(Opcode opcode, uint8_t width, VRegister vd, Register rs1,
+ Register rs2, MaskType mask, uint8_t IsMop, bool IsMew,
+ uint8_t Nf);
+ // VL VS AMO
+ void GenInstrV(Opcode opcode, uint8_t width, VRegister vd, Register rs1,
+ VRegister vs2, MaskType mask, uint8_t IsMop, bool IsMew,
+ uint8_t Nf);
+
// Labels.
void print(const Label* L);
void bind_to(Label* L, int pos);
diff --git a/deps/v8/src/codegen/riscv64/constants-riscv64.cc b/deps/v8/src/codegen/riscv64/constants-riscv64.cc
index d2709dc2c7..655a97c12f 100644
--- a/deps/v8/src/codegen/riscv64/constants-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/constants-riscv64.cc
@@ -105,6 +105,45 @@ int FPURegisters::Number(const char* name) {
return kInvalidFPURegister;
}
+const char* VRegisters::names_[kNumVRegisters] = {
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
+ "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21",
+ "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
+
+const VRegisters::RegisterAlias VRegisters::aliases_[] = {
+ {kInvalidRegister, nullptr}};
+
+const char* VRegisters::Name(int creg) {
+ const char* result;
+ if ((0 <= creg) && (creg < kNumVRegisters)) {
+ result = names_[creg];
+ } else {
+ result = "nocreg";
+ }
+ return result;
+}
+
+int VRegisters::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumVRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].creg != kInvalidRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].creg;
+ }
+ i++;
+ }
+
+ // No Cregister with the reguested name found.
+ return kInvalidVRegister;
+}
+
InstructionBase::Type InstructionBase::InstructionType() const {
if (IsIllegalInstruction()) {
return kUnsupported;
@@ -193,6 +232,8 @@ InstructionBase::Type InstructionBase::InstructionType() const {
return kJType;
case SYSTEM:
return kIType;
+ case OP_V:
+ return kVType;
}
}
return kUnsupported;
diff --git a/deps/v8/src/codegen/riscv64/constants-riscv64.h b/deps/v8/src/codegen/riscv64/constants-riscv64.h
index c9cb7687fd..934b962955 100644
--- a/deps/v8/src/codegen/riscv64/constants-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/constants-riscv64.h
@@ -12,14 +12,15 @@
// UNIMPLEMENTED_ macro for RISCV.
#ifdef DEBUG
-#define UNIMPLEMENTED_RISCV() \
- v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
- __FILE__, __LINE__, __func__)
+#define UNIMPLEMENTED_RISCV() \
+ v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
+ __FILE__, __LINE__, __func__);
#else
#define UNIMPLEMENTED_RISCV()
#endif
-#define UNSUPPORTED_RISCV() v8::internal::PrintF("Unsupported instruction.\n")
+#define UNSUPPORTED_RISCV() \
+ v8::internal::PrintF("Unsupported instruction %d.\n", __LINE__)
enum Endianness { kLittle, kBig };
@@ -75,6 +76,9 @@ const int kPCRegister = 34;
const int kNumFPURegisters = 32;
const int kInvalidFPURegister = -1;
+// Number vectotr registers
+const int kNumVRegisters = 32;
+const int kInvalidVRegister = -1;
// 'pref' instruction hints
const int32_t kPrefHintLoad = 0;
const int32_t kPrefHintStore = 1;
@@ -131,6 +135,24 @@ class FPURegisters {
static const RegisterAlias aliases_[];
};
+class VRegisters {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int creg;
+ const char* name;
+ };
+
+ private:
+ static const char* names_[kNumVRegisters];
+ static const RegisterAlias aliases_[];
+};
+
// -----------------------------------------------------------------------------
// Instructions encoding constants.
@@ -170,6 +192,12 @@ const int kFunct2Shift = 25;
const int kFunct2Bits = 2;
const int kRs1Shift = 15;
const int kRs1Bits = 5;
+const int kVs1Shift = 15;
+const int kVs1Bits = 5;
+const int kVs2Shift = 20;
+const int kVs2Bits = 5;
+const int kVdShift = 7;
+const int kVdBits = 5;
const int kRs2Shift = 20;
const int kRs2Bits = 5;
const int kRs3Shift = 27;
@@ -215,6 +243,71 @@ const int kRvcFunct2Bits = 2;
const int kRvcFunct6Shift = 10;
const int kRvcFunct6Bits = 6;
+// for RVV extension
+constexpr int kRvvELEN = 64;
+constexpr int kRvvVLEN = 128;
+constexpr int kRvvSLEN = kRvvVLEN;
+const int kRvvFunct6Shift = 26;
+const int kRvvFunct6Bits = 6;
+const uint32_t kRvvFunct6Mask =
+ (((1 << kRvvFunct6Bits) - 1) << kRvvFunct6Shift);
+
+const int kRvvVmBits = 1;
+const int kRvvVmShift = 25;
+const uint32_t kRvvVmMask = (((1 << kRvvVmBits) - 1) << kRvvVmShift);
+
+const int kRvvVs2Bits = 5;
+const int kRvvVs2Shift = 20;
+const uint32_t kRvvVs2Mask = (((1 << kRvvVs2Bits) - 1) << kRvvVs2Shift);
+
+const int kRvvVs1Bits = 5;
+const int kRvvVs1Shift = 15;
+const uint32_t kRvvVs1Mask = (((1 << kRvvVs1Bits) - 1) << kRvvVs1Shift);
+
+const int kRvvRs1Bits = kRvvVs1Bits;
+const int kRvvRs1Shift = kRvvVs1Shift;
+const uint32_t kRvvRs1Mask = (((1 << kRvvRs1Bits) - 1) << kRvvRs1Shift);
+
+const int kRvvRs2Bits = 5;
+const int kRvvRs2Shift = 20;
+const uint32_t kRvvRs2Mask = (((1 << kRvvRs2Bits) - 1) << kRvvRs2Shift);
+
+const int kRvvImm5Bits = kRvvVs1Bits;
+const int kRvvImm5Shift = kRvvVs1Shift;
+const uint32_t kRvvImm5Mask = (((1 << kRvvImm5Bits) - 1) << kRvvImm5Shift);
+
+const int kRvvVdBits = 5;
+const int kRvvVdShift = 7;
+const uint32_t kRvvVdMask = (((1 << kRvvVdBits) - 1) << kRvvVdShift);
+
+const int kRvvRdBits = kRvvVdBits;
+const int kRvvRdShift = kRvvVdShift;
+const uint32_t kRvvRdMask = (((1 << kRvvRdBits) - 1) << kRvvRdShift);
+
+const int kRvvZimmBits = 11;
+const int kRvvZimmShift = 20;
+const uint32_t kRvvZimmMask = (((1 << kRvvZimmBits) - 1) << kRvvZimmShift);
+
+const int kRvvUimmShift = kRvvRs1Shift;
+const int kRvvUimmBits = kRvvRs1Bits;
+const uint32_t kRvvUimmMask = (((1 << kRvvUimmBits) - 1) << kRvvUimmShift);
+
+const int kRvvWidthBits = 3;
+const int kRvvWidthShift = 12;
+const uint32_t kRvvWidthMask = (((1 << kRvvWidthBits) - 1) << kRvvWidthShift);
+
+const int kRvvMopBits = 2;
+const int kRvvMopShift = 26;
+const uint32_t kRvvMopMask = (((1 << kRvvMopBits) - 1) << kRvvMopShift);
+
+const int kRvvMewBits = 1;
+const int kRvvMewShift = 28;
+const uint32_t kRvvMewMask = (((1 << kRvvMewBits) - 1) << kRvvMewShift);
+
+const int kRvvNfBits = 3;
+const int kRvvNfShift = 29;
+const uint32_t kRvvNfMask = (((1 << kRvvNfBits) - 1) << kRvvNfShift);
+
// RISCV Instruction bit masks
const uint32_t kBaseOpcodeMask = ((1 << kBaseOpcodeBits) - 1)
<< kBaseOpcodeShift;
@@ -231,6 +324,7 @@ const uint32_t kSTypeMask = kBaseOpcodeMask | kFunct3Mask;
const uint32_t kBTypeMask = kBaseOpcodeMask | kFunct3Mask;
const uint32_t kUTypeMask = kBaseOpcodeMask;
const uint32_t kJTypeMask = kBaseOpcodeMask;
+const uint32_t kVTypeMask = kRvvFunct6Mask | kFunct3Mask | kBaseOpcodeMask;
const uint32_t kRs1FieldMask = ((1 << kRs1Bits) - 1) << kRs1Shift;
const uint32_t kRs2FieldMask = ((1 << kRs2Bits) - 1) << kRs2Shift;
const uint32_t kRs3FieldMask = ((1 << kRs3Bits) - 1) << kRs3Shift;
@@ -535,6 +629,235 @@ enum Opcode : uint32_t {
RO_C_FSDSP = C2 | (0b101 << kRvcFunct3Shift),
RO_C_SWSP = C2 | (0b110 << kRvcFunct3Shift),
RO_C_SDSP = C2 | (0b111 << kRvcFunct3Shift),
+
+ // RVV Extension
+ OP_V = 0b1010111,
+ OP_IVV = OP_V | (0b000 << kFunct3Shift),
+ OP_FVV = OP_V | (0b001 << kFunct3Shift),
+ OP_MVV = OP_V | (0b010 << kFunct3Shift),
+ OP_IVI = OP_V | (0b011 << kFunct3Shift),
+ OP_IVX = OP_V | (0b100 << kFunct3Shift),
+ OP_FVF = OP_V | (0b101 << kFunct3Shift),
+ OP_MVX = OP_V | (0b110 << kFunct3Shift),
+
+ RO_V_VSETVLI = OP_V | (0b111 << kFunct3Shift) | 0b0 << 31,
+ RO_V_VSETIVLI = OP_V | (0b111 << kFunct3Shift) | 0b11 << 30,
+ RO_V_VSETVL = OP_V | (0b111 << kFunct3Shift) | 0b1 << 31,
+
+ // RVV LOAD/STORE
+ RO_V_VL = LOAD_FP | (0b00 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VLS = LOAD_FP | (0b10 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VLX = LOAD_FP | (0b11 << kRvvMopShift) | (0b000 << kRvvNfShift),
+
+ RO_V_VS = STORE_FP | (0b00 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VSS = STORE_FP | (0b10 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VSX = STORE_FP | (0b11 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VSU = STORE_FP | (0b01 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ // THE kFunct6Shift is mop
+ RO_V_VLSEG2 = LOAD_FP | (0b00 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VLSEG3 = LOAD_FP | (0b00 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VLSEG4 = LOAD_FP | (0b00 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VLSEG5 = LOAD_FP | (0b00 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VLSEG6 = LOAD_FP | (0b00 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VLSEG7 = LOAD_FP | (0b00 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VLSEG8 = LOAD_FP | (0b00 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VSSEG2 = STORE_FP | (0b00 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VSSEG3 = STORE_FP | (0b00 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VSSEG4 = STORE_FP | (0b00 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VSSEG5 = STORE_FP | (0b00 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VSSEG6 = STORE_FP | (0b00 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VSSEG7 = STORE_FP | (0b00 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VSSEG8 = STORE_FP | (0b00 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VLSSEG2 = LOAD_FP | (0b10 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VLSSEG3 = LOAD_FP | (0b10 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VLSSEG4 = LOAD_FP | (0b10 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VLSSEG5 = LOAD_FP | (0b10 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VLSSEG6 = LOAD_FP | (0b10 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VLSSEG7 = LOAD_FP | (0b10 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VLSSEG8 = LOAD_FP | (0b10 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VSSSEG2 = STORE_FP | (0b10 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VSSSEG3 = STORE_FP | (0b10 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VSSSEG4 = STORE_FP | (0b10 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VSSSEG5 = STORE_FP | (0b10 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VSSSEG6 = STORE_FP | (0b10 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VSSSEG7 = STORE_FP | (0b10 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VSSSEG8 = STORE_FP | (0b10 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VLXSEG2 = LOAD_FP | (0b11 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VLXSEG3 = LOAD_FP | (0b11 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VLXSEG4 = LOAD_FP | (0b11 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VLXSEG5 = LOAD_FP | (0b11 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VLXSEG6 = LOAD_FP | (0b11 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VLXSEG7 = LOAD_FP | (0b11 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VLXSEG8 = LOAD_FP | (0b11 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VSXSEG2 = STORE_FP | (0b11 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VSXSEG3 = STORE_FP | (0b11 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VSXSEG4 = STORE_FP | (0b11 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VSXSEG5 = STORE_FP | (0b11 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VSXSEG6 = STORE_FP | (0b11 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VSXSEG7 = STORE_FP | (0b11 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VSXSEG8 = STORE_FP | (0b11 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ // RVV Vector Arithmetic Instruction
+ VADD_FUNCT6 = 0b000000,
+ RO_V_VADD_VI = OP_IVI | (VADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADD_VV = OP_IVV | (VADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADD_VX = OP_IVX | (VADD_FUNCT6 << kRvvFunct6Shift),
+
+ VSUB_FUNCT6 = 0b000010,
+ RO_V_VSUB_VX = OP_IVX | (VSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSUB_VV = OP_IVV | (VSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VSADDU_FUNCT6 = 0b100000,
+ RO_V_VSADDU_VI = OP_IVI | (VSADDU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADDU_VV = OP_IVV | (VSADDU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADDU_VX = OP_IVX | (VSADDU_FUNCT6 << kRvvFunct6Shift),
+
+ VSADD_FUNCT6 = 0b100001,
+ RO_V_VSADD_VI = OP_IVI | (VSADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADD_VV = OP_IVV | (VSADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADD_VX = OP_IVX | (VSADD_FUNCT6 << kRvvFunct6Shift),
+
+ VSSUB_FUNCT6 = 0b100011,
+ RO_V_VSSUB_VV = OP_IVV | (VSSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSSUB_VX = OP_IVX | (VSSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VSSUBU_FUNCT6 = 0b100010,
+ RO_V_VSSUBU_VV = OP_IVV | (VSSUBU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSSUBU_VX = OP_IVX | (VSSUBU_FUNCT6 << kRvvFunct6Shift),
+
+ VRSUB_FUNCT6 = 0b000011,
+ RO_V_VRSUB_VX = OP_IVX | (VRSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRSUB_VI = OP_IVI | (VRSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VMINU_FUNCT6 = 0b000100,
+ RO_V_VMINU_VX = OP_IVX | (VMINU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMINU_VV = OP_IVV | (VMINU_FUNCT6 << kRvvFunct6Shift),
+
+ VMIN_FUNCT6 = 0b000101,
+ RO_V_VMIN_VX = OP_IVX | (VMIN_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMIN_VV = OP_IVV | (VMIN_FUNCT6 << kRvvFunct6Shift),
+
+ VMAXU_FUNCT6 = 0b000110,
+ RO_V_VMAXU_VX = OP_IVX | (VMAXU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMAXU_VV = OP_IVV | (VMAXU_FUNCT6 << kRvvFunct6Shift),
+
+ VMAX_FUNCT6 = 0b000111,
+ RO_V_VMAX_VX = OP_IVX | (VMAX_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMAX_VV = OP_IVV | (VMAX_FUNCT6 << kRvvFunct6Shift),
+
+ VAND_FUNCT6 = 0b001001,
+ RO_V_VAND_VI = OP_IVI | (VAND_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VAND_VV = OP_IVV | (VAND_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VAND_VX = OP_IVX | (VAND_FUNCT6 << kRvvFunct6Shift),
+
+ VOR_FUNCT6 = 0b001010,
+ RO_V_VOR_VI = OP_IVI | (VOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VOR_VV = OP_IVV | (VOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VOR_VX = OP_IVX | (VOR_FUNCT6 << kRvvFunct6Shift),
+
+ VXOR_FUNCT6 = 0b001011,
+ RO_V_VXOR_VI = OP_IVI | (VXOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VXOR_VV = OP_IVV | (VXOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VXOR_VX = OP_IVX | (VXOR_FUNCT6 << kRvvFunct6Shift),
+
+ VRGATHER_FUNCT6 = 0b001100,
+ RO_V_VRGATHER_VI = OP_IVI | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRGATHER_VV = OP_IVV | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRGATHER_VX = OP_IVX | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
+
+ VMV_FUNCT6 = 0b010111,
+ RO_V_VMV_VI = OP_IVI | (VMV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMV_VV = OP_IVV | (VMV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMV_VX = OP_IVX | (VMV_FUNCT6 << kRvvFunct6Shift),
+
+ RO_V_VMERGE_VI = RO_V_VMV_VI,
+ RO_V_VMERGE_VV = RO_V_VMV_VV,
+ RO_V_VMERGE_VX = RO_V_VMV_VX,
+
+ VMSEQ_FUNCT6 = 0b011000,
+ RO_V_VMSEQ_VI = OP_IVI | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSEQ_VV = OP_IVV | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSEQ_VX = OP_IVX | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
+
+ VMSNE_FUNCT6 = 0b011001,
+ RO_V_VMSNE_VI = OP_IVI | (VMSNE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSNE_VV = OP_IVV | (VMSNE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSNE_VX = OP_IVX | (VMSNE_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLTU_FUNCT6 = 0b011010,
+ RO_V_VMSLTU_VV = OP_IVV | (VMSLTU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLTU_VX = OP_IVX | (VMSLTU_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLT_FUNCT6 = 0b011011,
+ RO_V_VMSLT_VV = OP_IVV | (VMSLT_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLT_VX = OP_IVX | (VMSLT_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLE_FUNCT6 = 0b011101,
+ RO_V_VMSLE_VI = OP_IVI | (VMSLE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLE_VV = OP_IVV | (VMSLE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLE_VX = OP_IVX | (VMSLE_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLEU_FUNCT6 = 0b011100,
+ RO_V_VMSLEU_VI = OP_IVI | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLEU_VV = OP_IVV | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLEU_VX = OP_IVX | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
+
+ VMSGTU_FUNCT6 = 0b011110,
+ RO_V_VMSGTU_VI = OP_IVI | (VMSGTU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSGTU_VX = OP_IVX | (VMSGTU_FUNCT6 << kRvvFunct6Shift),
+
+ VMSGT_FUNCT6 = 0b011111,
+ RO_V_VMSGT_VI = OP_IVI | (VMSGT_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSGT_VX = OP_IVX | (VMSGT_FUNCT6 << kRvvFunct6Shift),
+
+ VSLIDEUP_FUNCT6 = 0b001110,
+ RO_V_VSLIDEUP_VI = OP_IVI | (VSLIDEUP_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLIDEUP_VX = OP_IVX | (VSLIDEUP_FUNCT6 << kRvvFunct6Shift),
+
+ VSLIDEDOWN_FUNCT6 = 0b001111,
+ RO_V_VSLIDEDOWN_VI = OP_IVI | (VSLIDEDOWN_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLIDEDOWN_VX = OP_IVX | (VSLIDEDOWN_FUNCT6 << kRvvFunct6Shift),
+
+ VSRL_FUNCT6 = 0b101000,
+ RO_V_VSRL_VI = OP_IVI | (VSRL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSRL_VV = OP_IVV | (VSRL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSRL_VX = OP_IVX | (VSRL_FUNCT6 << kRvvFunct6Shift),
+
+ VSLL_FUNCT6 = 0b100101,
+ RO_V_VSLL_VI = OP_IVI | (VSLL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLL_VV = OP_IVV | (VSLL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLL_VX = OP_IVX | (VSLL_FUNCT6 << kRvvFunct6Shift),
+
+ VADC_FUNCT6 = 0b010000,
+ RO_V_VADC_VI = OP_IVI | (VADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADC_VV = OP_IVV | (VADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADC_VX = OP_IVX | (VADC_FUNCT6 << kRvvFunct6Shift),
+
+ VMADC_FUNCT6 = 0b010001,
+ RO_V_VMADC_VI = OP_IVI | (VMADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMADC_VV = OP_IVV | (VMADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMADC_VX = OP_IVX | (VMADC_FUNCT6 << kRvvFunct6Shift),
+
+ VWXUNARY0_FUNCT6 = 0b010000,
+ VRXUNARY0_FUNCT6 = 0b010000,
+
+ RO_V_VWXUNARY0 = OP_MVV | (VWXUNARY0_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRXUNARY0 = OP_MVX | (VRXUNARY0_FUNCT6 << kRvvFunct6Shift),
+
+ VREDMAXU_FUNCT6 = 0b000110,
+ RO_V_VREDMAXU = OP_MVV | (VREDMAXU_FUNCT6 << kRvvFunct6Shift),
+ VREDMAX_FUNCT6 = 0b000111,
+ RO_V_VREDMAX = OP_MVV | (VREDMAX_FUNCT6 << kRvvFunct6Shift),
+
+ VREDMINU_FUNCT6 = 0b000100,
+ RO_V_VREDMINU = OP_MVV | (VREDMINU_FUNCT6 << kRvvFunct6Shift),
+ VREDMIN_FUNCT6 = 0b000101,
+ RO_V_VREDMIN = OP_MVV | (VREDMIN_FUNCT6 << kRvvFunct6Shift),
};
// ----- Emulated conditions.
@@ -681,6 +1004,52 @@ enum FClassFlag {
kQuietNaN = 1 << 9
};
+#define RVV_SEW(V) \
+ V(E8) \
+ V(E16) \
+ V(E32) \
+ V(E64) \
+ V(E128) \
+ V(E256) \
+ V(E512) \
+ V(E1024)
+
+enum VSew {
+#define DEFINE_FLAG(name) name,
+ RVV_SEW(DEFINE_FLAG)
+#undef DEFINE_FLAG
+};
+
+#define RVV_LMUL(V) \
+ V(m1) \
+ V(m2) \
+ V(m4) \
+ V(m8) \
+ V(RESERVERD) \
+ V(mf8) \
+ V(mf4) \
+ V(mf2)
+
+enum Vlmul {
+#define DEFINE_FLAG(name) name,
+ RVV_LMUL(DEFINE_FLAG)
+#undef DEFINE_FLAG
+};
+
+enum TailAgnosticType {
+ ta = 0x1, // Tail agnostic
+ tu = 0x0, // Tail undisturbed
+};
+
+enum MaskAgnosticType {
+ ma = 0x1, // Mask agnostic
+ mu = 0x0, // Mask undisturbed
+};
+enum MaskType {
+ Mask = 0x0, // use the mask
+ NoMask = 0x1,
+};
+
// -----------------------------------------------------------------------------
// Hints.
@@ -734,6 +1103,19 @@ class InstructionBase {
kCAType,
kCBType,
kCJType,
+ // V extension
+ kVType,
+ kVLType,
+ kVSType,
+ kVAMOType,
+ kVIVVType,
+ kVFVVType,
+ kVMVVType,
+ kVIVIType,
+ kVIVXType,
+ kVFVFType,
+ kVMVXType,
+ kVSETType,
kUnsupported = -1
};
@@ -840,7 +1222,9 @@ class InstructionGetters : public T {
this->InstructionType() == InstructionBase::kR4Type ||
this->InstructionType() == InstructionBase::kIType ||
this->InstructionType() == InstructionBase::kSType ||
- this->InstructionType() == InstructionBase::kBType);
+ this->InstructionType() == InstructionBase::kBType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kVType);
return this->Bits(kRs1Shift + kRs1Bits - 1, kRs1Shift);
}
@@ -848,7 +1232,9 @@ class InstructionGetters : public T {
DCHECK(this->InstructionType() == InstructionBase::kRType ||
this->InstructionType() == InstructionBase::kR4Type ||
this->InstructionType() == InstructionBase::kSType ||
- this->InstructionType() == InstructionBase::kBType);
+ this->InstructionType() == InstructionBase::kBType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kVType);
return this->Bits(kRs2Shift + kRs2Bits - 1, kRs2Shift);
}
@@ -857,12 +1243,35 @@ class InstructionGetters : public T {
return this->Bits(kRs3Shift + kRs3Bits - 1, kRs3Shift);
}
+ inline int Vs1Value() const {
+ DCHECK(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kVs1Shift + kVs1Bits - 1, kVs1Shift);
+ }
+
+ inline int Vs2Value() const {
+ DCHECK(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kVs2Shift + kVs2Bits - 1, kVs2Shift);
+ }
+
+ inline int VdValue() const {
+ DCHECK(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kVdShift + kVdBits - 1, kVdShift);
+ }
+
inline int RdValue() const {
DCHECK(this->InstructionType() == InstructionBase::kRType ||
this->InstructionType() == InstructionBase::kR4Type ||
this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType ||
this->InstructionType() == InstructionBase::kUType ||
- this->InstructionType() == InstructionBase::kJType);
+ this->InstructionType() == InstructionBase::kJType ||
+ this->InstructionType() == InstructionBase::kVType);
return this->Bits(kRdShift + kRdBits - 1, kRdShift);
}
@@ -1149,6 +1558,129 @@ class InstructionGetters : public T {
return imm9 << 23 >> 23;
}
+ inline int vl_vs_width() {
+ int width = 0;
+ if ((this->InstructionBits() & kBaseOpcodeMask) != LOAD_FP &&
+ (this->InstructionBits() & kBaseOpcodeMask) != STORE_FP)
+ return -1;
+ switch (this->InstructionBits() & (kRvvWidthMask | kRvvMewMask)) {
+ case 0x0:
+ width = 8;
+ break;
+ case 0x00005000:
+ width = 16;
+ break;
+ case 0x00006000:
+ width = 32;
+ break;
+ case 0x00007000:
+ width = 64;
+ break;
+ case 0x10000000:
+ width = 128;
+ break;
+ case 0x10005000:
+ width = 256;
+ break;
+ case 0x10006000:
+ width = 512;
+ break;
+ case 0x10007000:
+ width = 1024;
+ break;
+ default:
+ width = -1;
+ break;
+ }
+ return width;
+ }
+
+ inline uint32_t Rvvzimm() const {
+ if ((this->InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0x80000000)) == RO_V_VSETVLI) {
+ uint32_t Bits = this->InstructionBits();
+ uint32_t zimm = Bits & kRvvZimmMask;
+ return zimm >> kRvvZimmShift;
+ } else {
+ DCHECK_EQ(this->InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0xC0000000),
+ RO_V_VSETIVLI);
+ uint32_t Bits = this->InstructionBits();
+ uint32_t zimm = Bits & kRvvZimmMask;
+ return (zimm >> kRvvZimmShift) & 0x3FF;
+ }
+ }
+
+ inline uint32_t Rvvuimm() const {
+ DCHECK_EQ(
+ this->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask | 0xC0000000),
+ RO_V_VSETIVLI);
+ uint32_t Bits = this->InstructionBits();
+ uint32_t uimm = Bits & kRvvUimmMask;
+ return uimm >> kRvvUimmShift;
+ }
+
+ inline uint32_t RvvVsew() const {
+ uint32_t zimm = this->Rvvzimm();
+ uint32_t vsew = (zimm >> 3) & 0x7;
+ return vsew;
+ }
+
+ inline uint32_t RvvVlmul() const {
+ uint32_t zimm = this->Rvvzimm();
+ uint32_t vlmul = zimm & 0x7;
+ return vlmul;
+ }
+
+ inline uint8_t RvvVM() const {
+ DCHECK(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kRvvVmShift + kRvvVmBits - 1, kRvvVmShift);
+ }
+
+ inline const char* RvvSEW() const {
+ uint32_t vsew = this->RvvVsew();
+ switch (vsew) {
+#define CAST_VSEW(name) \
+ case name: \
+ return #name;
+ RVV_SEW(CAST_VSEW)
+ default:
+ return "unknown";
+#undef CAST_VSEW
+ }
+ }
+
+ inline const char* RvvLMUL() const {
+ uint32_t vlmul = this->RvvVlmul();
+ switch (vlmul) {
+#define CAST_VLMUL(name) \
+ case name: \
+ return #name;
+ RVV_LMUL(CAST_VLMUL)
+ default:
+ return "unknown";
+#undef CAST_VSEW
+ }
+ }
+
+#define sext(x, len) (((int32_t)(x) << (32 - len)) >> (32 - len))
+#define zext(x, len) (((uint32_t)(x) << (32 - len)) >> (32 - len))
+
+ inline int32_t RvvSimm5() const {
+ DCHECK(this->InstructionType() == InstructionBase::kVType);
+ return sext(this->Bits(kRvvImm5Shift + kRvvImm5Bits - 1, kRvvImm5Shift),
+ kRvvImm5Bits);
+ }
+
+ inline uint32_t RvvUimm5() const {
+ DCHECK(this->InstructionType() == InstructionBase::kVType);
+ uint32_t imm = this->Bits(kRvvImm5Shift + kRvvImm5Bits - 1, kRvvImm5Shift);
+ return zext(imm, kRvvImm5Bits);
+ }
+#undef sext
+#undef zext
inline bool AqValue() const { return this->Bits(kAqShift, kAqShift); }
inline bool RlValue() const { return this->Bits(kRlShift, kRlShift); }
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
index 3baa71d1a2..0e0d8bda5a 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
@@ -1057,7 +1057,10 @@ void TurboAssembler::CalcScaledAddress(Register rd, Register rt, Register rs,
// ------------Pseudo-instructions-------------
// Change endianness
-void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size) {
+void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size,
+ Register scratch) {
+ DCHECK_NE(scratch, rs);
+ DCHECK_NE(scratch, rd);
DCHECK(operand_size == 4 || operand_size == 8);
if (operand_size == 4) {
// Uint32_t x1 = 0x00FF00FF;
@@ -1068,7 +1071,7 @@ void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size) {
DCHECK((rd != t6) && (rs != t6));
Register x0 = temps.Acquire();
Register x1 = temps.Acquire();
- Register x2 = temps.Acquire();
+ Register x2 = scratch;
li(x1, 0x00FF00FF);
slliw(x0, rs, 16);
srliw(rd, rs, 16);
@@ -1090,7 +1093,7 @@ void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size) {
DCHECK((rd != t6) && (rs != t6));
Register x0 = temps.Acquire();
Register x1 = temps.Acquire();
- Register x2 = temps.Acquire();
+ Register x2 = scratch;
li(x1, 0x0000FFFF0000FFFFl);
slli(x0, rs, 32);
srli(rd, rs, 32);
@@ -1193,20 +1196,19 @@ void TurboAssembler::UnalignedLoadHelper(Register rd, const MemOperand& rs) {
}
template <int NBYTES>
-void TurboAssembler::UnalignedFLoadHelper(FPURegister frd,
- const MemOperand& rs) {
+void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
+ Register scratch_base) {
DCHECK(NBYTES == 4 || NBYTES == 8);
-
+ DCHECK_NE(scratch_base, rs.rm());
BlockTrampolinePoolScope block_trampoline_pool(this);
MemOperand source = rs;
- UseScratchRegisterScope temps(this);
- Register scratch_base = temps.Acquire();
if (NeedAdjustBaseAndOffset(rs, OffsetAccessType::TWO_ACCESSES, NBYTES - 1)) {
// Adjust offset for two accesses and check if offset + 3 fits into int12.
DCHECK(scratch_base != rs.rm());
AdjustBaseAndOffset(&source, scratch_base, OffsetAccessType::TWO_ACCESSES,
NBYTES - 1);
}
+ UseScratchRegisterScope temps(this);
Register scratch_other = temps.Acquire();
Register scratch = temps.Acquire();
DCHECK(scratch != rs.rm() && scratch_other != scratch &&
@@ -1258,10 +1260,10 @@ void TurboAssembler::UnalignedStoreHelper(Register rd, const MemOperand& rs,
template <int NBYTES>
void TurboAssembler::UnalignedFStoreHelper(FPURegister frd,
- const MemOperand& rs) {
+ const MemOperand& rs,
+ Register scratch) {
DCHECK(NBYTES == 8 || NBYTES == 4);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
+ DCHECK_NE(scratch, rs.rm());
if (NBYTES == 4) {
fmv_x_w(scratch, frd);
} else {
@@ -1354,20 +1356,28 @@ void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs) {
Sw(scratch, MemOperand(rs.rm(), rs.offset() + kSystemPointerSize / 2));
}
-void TurboAssembler::ULoadFloat(FPURegister fd, const MemOperand& rs) {
- UnalignedFLoadHelper<4>(fd, rs);
+void TurboAssembler::ULoadFloat(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK_NE(scratch, rs.rm());
+ UnalignedFLoadHelper<4>(fd, rs, scratch);
}
-void TurboAssembler::UStoreFloat(FPURegister fd, const MemOperand& rs) {
- UnalignedFStoreHelper<4>(fd, rs);
+void TurboAssembler::UStoreFloat(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK_NE(scratch, rs.rm());
+ UnalignedFStoreHelper<4>(fd, rs, scratch);
}
-void TurboAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs) {
- UnalignedFLoadHelper<8>(fd, rs);
+void TurboAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK_NE(scratch, rs.rm());
+ UnalignedFLoadHelper<8>(fd, rs, scratch);
}
-void TurboAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs) {
- UnalignedFStoreHelper<8>(fd, rs);
+void TurboAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK_NE(scratch, rs.rm());
+ UnalignedFStoreHelper<8>(fd, rs, scratch);
}
void TurboAssembler::Lb(Register rd, const MemOperand& rs) {
@@ -1664,8 +1674,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
BlockGrowBufferScope block_growbuffer(this);
int offset = pc_offset();
Address address = j.immediate();
- saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(offset, address));
+ saved_handles_for_raw_object_ptr_.emplace_back(offset, address);
Handle<HeapObject> object(reinterpret_cast<Address*>(address));
int64_t immediate = object->ptr();
RecordRelocInfo(j.rmode(), immediate);
@@ -2442,7 +2451,6 @@ void TurboAssembler::CompareI(Register rd, Register rs, const Operand& rt,
break;
case cc_always:
UNREACHABLE();
- break;
default:
UNREACHABLE();
}
@@ -2620,7 +2628,9 @@ void TurboAssembler::Ctz64(Register rd, Register rs) {
}
}
-void TurboAssembler::Popcnt32(Register rd, Register rs) {
+void TurboAssembler::Popcnt32(Register rd, Register rs, Register scratch) {
+ DCHECK_NE(scratch, rs);
+ DCHECK_NE(scratch, rd);
// https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
//
// A generalization of the best bit counting method to integers of
@@ -2644,7 +2654,6 @@ void TurboAssembler::Popcnt32(Register rd, Register rs) {
uint32_t shift = 24;
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
- Register scratch = temps.Acquire();
Register scratch2 = temps.Acquire();
Register value = temps.Acquire();
DCHECK((rd != value) && (rs != value));
@@ -2669,7 +2678,9 @@ void TurboAssembler::Popcnt32(Register rd, Register rs) {
Srl32(rd, rd, shift);
}
-void TurboAssembler::Popcnt64(Register rd, Register rs) {
+void TurboAssembler::Popcnt64(Register rd, Register rs, Register scratch) {
+ DCHECK_NE(scratch, rs);
+ DCHECK_NE(scratch, rd);
// uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
// uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
// uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
@@ -2679,7 +2690,6 @@ void TurboAssembler::Popcnt64(Register rd, Register rs) {
uint64_t shift = 24;
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
- Register scratch = temps.Acquire();
Register scratch2 = temps.Acquire();
Register value = temps.Acquire();
DCHECK((rd != value) && (rs != value));
@@ -3006,7 +3016,6 @@ bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
DCHECK_EQ(offset, 0);
return BranchShortHelper(0, L, cond, rs, rt);
}
- return false;
}
void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
@@ -3122,7 +3131,6 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
DCHECK_EQ(offset, 0);
return BranchAndLinkShortHelper(0, L, cond, rs, rt);
}
- return false;
}
void TurboAssembler::LoadFromConstantsTable(Register destination,
@@ -3549,9 +3557,9 @@ void MacroAssembler::PushStackHandler() {
// Link the current handler as the next handler.
UseScratchRegisterScope temps(this);
Register handler_address = temps.Acquire();
- Register handler = temps.Acquire();
li(handler_address,
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
+ Register handler = temps.Acquire();
Ld(handler, MemOperand(handler_address));
push(handler);
@@ -3813,18 +3821,19 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
// Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
Register expected_parameter_count = a2;
- UseScratchRegisterScope temps(this);
- Register temp_reg = temps.Acquire();
- LoadTaggedPointerField(
- temp_reg,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- LoadTaggedPointerField(cp,
- FieldMemOperand(function, JSFunction::kContextOffset));
- // The argument count is stored as uint16_t
- Lhu(expected_parameter_count,
- FieldMemOperand(temp_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
-
+ {
+ UseScratchRegisterScope temps(this);
+ Register temp_reg = temps.Acquire();
+ LoadTaggedPointerField(
+ temp_reg,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ LoadTaggedPointerField(
+ cp, FieldMemOperand(function, JSFunction::kContextOffset));
+ // The argument count is stored as uint16_t
+ Lhu(expected_parameter_count,
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ }
InvokeFunctionCode(function, new_target, expected_parameter_count,
actual_parameter_count, type);
}
@@ -3861,7 +3870,74 @@ void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
Sub64(range, type_reg, Operand(lower_limit));
}
-
+//------------------------------------------------------------------------------
+// Wasm
+void TurboAssembler::WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs,
+ VSew sew, Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ vmseq_vv(v0, lhs, rhs);
+ li(kScratchReg, -1);
+ vmv_vx(dst, zero_reg);
+ vmerge_vx(dst, kScratchReg, dst);
+}
+
+void TurboAssembler::WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs,
+ VSew sew, Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ vmsne_vv(v0, lhs, rhs);
+ li(kScratchReg, -1);
+ vmv_vx(dst, zero_reg);
+ vmerge_vx(dst, kScratchReg, dst);
+}
+
+void TurboAssembler::WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs,
+ VSew sew, Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ vmsle_vv(v0, rhs, lhs);
+ li(kScratchReg, -1);
+ vmv_vx(dst, zero_reg);
+ vmerge_vx(dst, kScratchReg, dst);
+}
+
+void TurboAssembler::WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs,
+ VSew sew, Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ vmsleu_vv(v0, rhs, lhs);
+ li(kScratchReg, -1);
+ vmv_vx(dst, zero_reg);
+ vmerge_vx(dst, kScratchReg, dst);
+}
+
+void TurboAssembler::WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs,
+ VSew sew, Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ vmslt_vv(v0, rhs, lhs);
+ li(kScratchReg, -1);
+ vmv_vx(dst, zero_reg);
+ vmerge_vx(dst, kScratchReg, dst);
+}
+
+void TurboAssembler::WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs,
+ VSew sew, Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ vmsltu_vv(v0, rhs, lhs);
+ li(kScratchReg, -1);
+ vmv_vx(dst, zero_reg);
+ vmerge_vx(dst, kScratchReg, dst);
+}
+
+void TurboAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) {
+ uint64_t imm1 = *(reinterpret_cast<const uint64_t*>(imms));
+ uint64_t imm2 = *((reinterpret_cast<const uint64_t*>(imms)) + 1);
+ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
+ li(kScratchReg, 1);
+ vmv_vx(v0, kScratchReg);
+ li(kScratchReg, imm1);
+ vmerge_vx(dst, kScratchReg, dst);
+ li(kScratchReg, imm2);
+ vsll_vi(v0, v0, 1);
+ vmerge_vx(dst, kScratchReg, dst);
+}
// -----------------------------------------------------------------------------
// Runtime calls.
@@ -4743,10 +4819,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
pop(ra); // Restore ra
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- li(kSpeculationPoisonRegister, -1);
-}
-
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
index 04285916bc..53e8543429 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
@@ -151,6 +151,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Branch(Label* target);
void Branch(int32_t target);
+ void BranchLong(Label* L);
void Branch(Label* target, Condition cond, Register r1, const Operand& r2,
Label::Distance near_jump = Label::kFar);
void Branch(int32_t target, Condition cond, Register r1, const Operand& r2,
@@ -570,8 +571,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Clz64(Register rd, Register rs);
void Ctz32(Register rd, Register rs);
void Ctz64(Register rd, Register rs);
- void Popcnt32(Register rd, Register rs);
- void Popcnt64(Register rd, Register rs);
+ void Popcnt32(Register rd, Register rs, Register scratch);
+ void Popcnt64(Register rd, Register rs, Register scratch);
// Bit field starts at bit pos and extending for size bits is extracted from
// rs and stored zero/sign-extended and right-justified in rt
@@ -590,7 +591,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Neg_d(FPURegister fd, FPURegister fs);
// Change endianness
- void ByteSwap(Register dest, Register src, int operand_size);
+ void ByteSwap(Register dest, Register src, int operand_size,
+ Register scratch);
void Clear_if_nan_d(Register rd, FPURegister fs);
void Clear_if_nan_s(Register rd, FPURegister fs);
@@ -605,9 +607,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch_other = no_reg);
template <int NBYTES>
- void UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs);
+ void UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
+ Register scratch);
template <int NBYTES>
- void UnalignedFStoreHelper(FPURegister frd, const MemOperand& rs);
+ void UnalignedFStoreHelper(FPURegister frd, const MemOperand& rs,
+ Register scratch);
template <typename Reg_T, typename Func>
void AlignedLoadHelper(Reg_T target, const MemOperand& rs, Func generator);
@@ -631,11 +635,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Uld(Register rd, const MemOperand& rs);
void Usd(Register rd, const MemOperand& rs);
- void ULoadFloat(FPURegister fd, const MemOperand& rs);
- void UStoreFloat(FPURegister fd, const MemOperand& rs);
+ void ULoadFloat(FPURegister fd, const MemOperand& rs, Register scratch);
+ void UStoreFloat(FPURegister fd, const MemOperand& rs, Register scratch);
- void ULoadDouble(FPURegister fd, const MemOperand& rs);
- void UStoreDouble(FPURegister fd, const MemOperand& rs);
+ void ULoadDouble(FPURegister fd, const MemOperand& rs, Register scratch);
+ void UStoreDouble(FPURegister fd, const MemOperand& rs, Register scratch);
void Lb(Register rd, const MemOperand& rs);
void Lbu(Register rd, const MemOperand& rs);
@@ -857,8 +861,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- void ResetSpeculationPoisonRegister();
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
@@ -908,6 +910,31 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Sub64(rd, rs1, rs2);
}
}
+ // Wasm into RVV
+ void WasmRvvExtractLane(Register dst, VRegister src, int8_t idx, VSew sew,
+ Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ VRegister Vsrc = idx != 0 ? kSimd128ScratchReg : src;
+ if (idx != 0) {
+ vslidedown_vi(kSimd128ScratchReg, src, idx);
+ }
+ vmv_xs(dst, Vsrc);
+ }
+
+ void WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
+ Vlmul lmul);
+
+ void WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
+ Vlmul lmul);
+ void WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
+ Vlmul lmul);
+ void WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
+ Vlmul lmul);
+ void WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
+ Vlmul lmul);
+ void WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
+ Vlmul lmul);
+ void WasmRvvS128const(VRegister dst, const uint8_t imms[16]);
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
@@ -945,7 +972,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register rs, const Operand& rt);
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
- void BranchLong(Label* L);
void BranchAndLinkLong(Label* L);
template <typename F_TYPE>
diff --git a/deps/v8/src/codegen/riscv64/register-riscv64.h b/deps/v8/src/codegen/riscv64/register-riscv64.h
index 69654a4f54..2d2fccdf3a 100644
--- a/deps/v8/src/codegen/riscv64/register-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/register-riscv64.h
@@ -49,16 +49,16 @@ namespace internal {
V(fs8) V(fs9) V(fs10) V(fs11) V(ft8) V(ft9) V(ft10) V(ft11)
#define FLOAT_REGISTERS DOUBLE_REGISTERS
-#define SIMD128_REGISTERS(V) \
- V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \
- V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \
- V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \
- V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31)
+#define VECTOR_REGISTERS(V) \
+ V(v0) V(v1) V(v2) V(v3) V(v4) V(v5) V(v6) V(v7) \
+ V(v8) V(v9) V(v10) V(v11) V(v12) V(v13) V(v14) V(v15) \
+ V(v16) V(v17) V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \
+ V(v24) V(v25) V(v26) V(v27) V(v28) V(v29) V(v30) V(v31)
-#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
- V(ft0) V(ft1) V(ft2) V(ft3) \
- V(ft4) V(ft5) V(ft6) V(ft7) V(fa0) V(fa1) V(fa2) V(fa3) V(fa4) V(fa5) \
- V(fa6) V(fa7)
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(ft1) V(ft2) V(ft3) V(ft4) V(ft5) V(ft6) V(ft7) V(ft8) \
+ V(ft9) V(ft10) V(ft11) V(fa0) V(fa1) V(fa2) V(fa3) V(fa4) V(fa5) \
+ V(fa6) V(fa7)
// Returns the number of padding slots needed for stack pointer alignment.
constexpr int ArgumentPaddingSlots(int argument_count) {
@@ -256,6 +256,19 @@ enum DoubleRegisterCode {
kDoubleAfterLast
};
+enum VRegisterCode {
+#define REGISTER_CODE(R) kVRCode_##R,
+ VECTOR_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kVRAfterLast
+};
+class VRegister : public RegisterBase<VRegister, kVRAfterLast> {
+ friend class RegisterBase;
+
+ public:
+ explicit constexpr VRegister(int code) : RegisterBase(code) {}
+};
+
// Coprocessor register.
class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
public:
@@ -274,25 +287,24 @@ class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
return FPURegister::from_code(code() + 1);
}
+ // FIXME(riscv64): In Rvv, Vector regs is different from Float Regs. But in
+ // this cl, in order to facilitate modification, it is assumed that the vector
+ // register and floating point register are shared.
+ VRegister toV() const {
+ DCHECK(base::IsInRange(code(), 0, kVRAfterLast - 1));
+ // FIXME(riscv): Because V0 is a special mask reg, so can't allocate it.
+ // And v8 is unallocated so we replace v0 with v8
+ if (code() == 0) {
+ return VRegister(8);
+ }
+ return VRegister(code());
+ }
+
private:
friend class RegisterBase;
explicit constexpr FPURegister(int code) : RegisterBase(code) {}
};
-enum MSARegisterCode {
-#define REGISTER_CODE(R) kMsaCode_##R,
- SIMD128_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kMsaAfterLast
-};
-
-// MIPS SIMD (MSA) register
-// TODO(RISCV): Remove MIPS MSA registers.
-// https://github.com/v8-riscv/v8/issues/429
-class MSARegister : public RegisterBase<MSARegister, kMsaAfterLast> {
- friend class RegisterBase;
- explicit constexpr MSARegister(int code) : RegisterBase(code) {}
-};
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0.
@@ -304,6 +316,8 @@ using FloatRegister = FPURegister;
using DoubleRegister = FPURegister;
+using Simd128Register = VRegister;
+
#define DECLARE_DOUBLE_REGISTER(R) \
constexpr DoubleRegister R = DoubleRegister::from_code(kDoubleCode_##R);
DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
@@ -311,15 +325,12 @@ DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
-// SIMD registers.
-using Simd128Register = MSARegister;
-
-#define DECLARE_SIMD128_REGISTER(R) \
- constexpr Simd128Register R = Simd128Register::from_code(kMsaCode_##R);
-SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
-#undef DECLARE_SIMD128_REGISTER
+#define DECLARE_VECTOR_REGISTER(R) \
+ constexpr VRegister R = VRegister::from_code(kVRCode_##R);
+VECTOR_REGISTERS(DECLARE_VECTOR_REGISTER)
+#undef DECLARE_VECTOR_REGISTER
-const Simd128Register no_msareg = Simd128Register::no_reg();
+const VRegister no_msareg = VRegister::no_reg();
// Register aliases.
// cp is assumed to be a callee saved register.
@@ -328,14 +339,14 @@ constexpr Register cp = s7;
constexpr Register kScratchReg = s3;
constexpr Register kScratchReg2 = s4;
-constexpr DoubleRegister kScratchDoubleReg = fs11;
+constexpr DoubleRegister kScratchDoubleReg = ft0;
constexpr DoubleRegister kDoubleRegZero = fs9;
// Define {RegisterName} methods for the register types.
DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
DEFINE_REGISTER_NAMES(FPURegister, DOUBLE_REGISTERS)
-DEFINE_REGISTER_NAMES(MSARegister, SIMD128_REGISTERS)
+DEFINE_REGISTER_NAMES(VRegister, VECTOR_REGISTERS)
// Give alias names to registers for calling conventions.
constexpr Register kReturnRegister0 = a0;
@@ -344,7 +355,6 @@ constexpr Register kReturnRegister2 = a2;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a1;
-constexpr Register kSpeculationPoisonRegister = a7;
constexpr Register kInterpreterAccumulatorRegister = a0;
constexpr Register kInterpreterBytecodeOffsetRegister = t0;
constexpr Register kInterpreterBytecodeArrayRegister = t1;
@@ -364,6 +374,9 @@ constexpr Register kWasmInstanceRegister = a0;
constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
constexpr DoubleRegister kFPReturnRegister0 = fa0;
+constexpr VRegister kSimd128ScratchReg = v27;
+constexpr VRegister kSimd128ScratchReg2 = v26;
+constexpr VRegister kSimd128RegZero = v25;
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
constexpr Register kPtrComprCageBaseRegister = s11; // callee save
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index 511096e0db..e799f8e8a4 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -440,7 +440,6 @@ Condition Assembler::GetCondition(Instr instr) {
default:
UNIMPLEMENTED();
}
- return al;
}
#if V8_TARGET_ARCH_S390X
diff --git a/deps/v8/src/codegen/s390/constants-s390.h b/deps/v8/src/codegen/s390/constants-s390.h
index b16963e52a..23e77c93d7 100644
--- a/deps/v8/src/codegen/s390/constants-s390.h
+++ b/deps/v8/src/codegen/s390/constants-s390.h
@@ -1553,14 +1553,28 @@ using SixByteInstr = uint64_t;
V(vlrep, VLREP, 0xE705) /* type = VRX VECTOR LOAD AND REPLICATE */ \
V(vl, VL, 0xE706) /* type = VRX VECTOR LOAD */ \
V(vlbb, VLBB, 0xE707) /* type = VRX VECTOR LOAD TO BLOCK BOUNDARY */ \
+ V(vlbr, VLBR, 0xE606) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENTS */ \
+ V(vlbrrep, VLBRREP, \
+ 0xE605) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENT AND REPLICATE */ \
+ V(vlebrh, VLEBRH, \
+ 0xE601) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENT (16) */ \
+ V(vlebrf, VLEBRF, \
+ 0xE603) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENT (32) */ \
+ V(vlebrg, VLEBRG, \
+ 0xE602) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENT (64) */ \
V(vsteb, VSTEB, 0xE708) /* type = VRX VECTOR STORE ELEMENT (8) */ \
V(vsteh, VSTEH, 0xE709) /* type = VRX VECTOR STORE ELEMENT (16) */ \
V(vsteg, VSTEG, 0xE70A) /* type = VRX VECTOR STORE ELEMENT (64) */ \
V(vstef, VSTEF, 0xE70B) /* type = VRX VECTOR STORE ELEMENT (32) */ \
V(vst, VST, 0xE70E) /* type = VRX VECTOR STORE */ \
- V(vlbr, VLBR, 0xE606) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENTS */ \
- V(vstbr, VSTBR, 0xE60E) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENTS \
- */
+ V(vstbr, VSTBR, \
+ 0xE60E) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENTS */ \
+ V(vstebrh, VSTEBRH, \
+ 0xE609) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENT (16) */ \
+ V(vstebrf, VSTEBRF, \
+ 0xE60B) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENT (32) */ \
+ V(vstebrg, VSTEBRG, \
+ 0xE60A) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENT (64) */
#define S390_RIE_G_OPCODE_LIST(V) \
V(lochi, LOCHI, \
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 4de7f2cf4b..a6c55746f8 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -1184,7 +1184,6 @@ void TurboAssembler::ConvertFloat32ToInt64(const Register dst,
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1193,7 +1192,6 @@ void TurboAssembler::ConvertFloat32ToInt64(const Register dst,
break;
default:
UNIMPLEMENTED();
- break;
}
cgebr(m, dst, double_input);
}
@@ -1208,7 +1206,6 @@ void TurboAssembler::ConvertDoubleToInt64(const Register dst,
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1217,7 +1214,6 @@ void TurboAssembler::ConvertDoubleToInt64(const Register dst,
break;
default:
UNIMPLEMENTED();
- break;
}
cgdbr(m, dst, double_input);
}
@@ -1241,7 +1237,6 @@ void TurboAssembler::ConvertDoubleToInt32(const Register dst,
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(dst, Operand::Zero());
@@ -1268,7 +1263,6 @@ void TurboAssembler::ConvertFloat32ToInt32(const Register result,
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(result, Operand::Zero());
@@ -1286,7 +1280,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt32(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1295,7 +1288,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt32(
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(result, Operand::Zero());
@@ -1313,7 +1305,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt64(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1322,7 +1313,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt64(
break;
default:
UNIMPLEMENTED();
- break;
}
clgebr(m, Condition(0), result, double_input);
}
@@ -1337,7 +1327,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt64(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1346,7 +1335,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt64(
break;
default:
UNIMPLEMENTED();
- break;
}
clgdbr(m, Condition(0), dst, double_input);
}
@@ -1361,7 +1349,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt32(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1370,7 +1357,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt32(
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(dst, Operand::Zero());
@@ -3924,6 +3910,125 @@ void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
}
}
+// Vector LE Load and Transform instructions.
+void TurboAssembler::LoadAndSplat8x16LE(Simd128Register dst,
+ const MemOperand& mem) {
+ vlrep(dst, mem, Condition(0));
+}
+#define LOAD_SPLAT_LIST(V) \
+ V(64x2, LoadU64LE, 3) \
+ V(32x4, LoadU32LE, 2) \
+ V(16x8, LoadU16LE, 1)
+
+#define LOAD_SPLAT(name, scalar_instr, condition) \
+ void TurboAssembler::LoadAndSplat##name##LE(Simd128Register dst, \
+ const MemOperand& mem) { \
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) && \
+ is_uint12(mem.offset())) { \
+ vlbrrep(dst, mem, Condition(condition)); \
+ return; \
+ } \
+ scalar_instr(r1, mem); \
+ vlvg(dst, r1, MemOperand(r0, 0), Condition(condition)); \
+ vrep(dst, dst, Operand(0), Condition(condition)); \
+ }
+LOAD_SPLAT_LIST(LOAD_SPLAT)
+#undef LOAD_SPLAT
+#undef LOAD_SPLAT_LIST
+
+#define LOAD_EXTEND_LIST(V) \
+ V(32x2U, vuplh, 2) \
+ V(32x2S, vuph, 2) \
+ V(16x4U, vuplh, 1) \
+ V(16x4S, vuph, 1) \
+ V(8x8U, vuplh, 0) \
+ V(8x8S, vuph, 0)
+
+#define LOAD_EXTEND(name, unpack_instr, condition) \
+ void TurboAssembler::LoadAndExtend##name##LE(Simd128Register dst, \
+ const MemOperand& mem) { \
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) && \
+ is_uint12(mem.offset())) { \
+ vlebrg(kScratchDoubleReg, mem, Condition(0)); \
+ } else { \
+ LoadU64LE(r1, mem); \
+ vlvg(kScratchDoubleReg, r1, MemOperand(r0, 0), Condition(3)); \
+ } \
+ unpack_instr(dst, kScratchDoubleReg, Condition(0), Condition(0), \
+ Condition(condition)); \
+ }
+LOAD_EXTEND_LIST(LOAD_EXTEND)
+#undef LOAD_EXTEND
+#undef LOAD_EXTEND
+
+void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem) {
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
+ vlebrf(dst, mem, Condition(3));
+ return;
+ }
+ LoadU32LE(r1, mem);
+ vlvg(dst, r1, MemOperand(r0, 3), Condition(2));
+}
+
+void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem) {
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
+ vlebrg(dst, mem, Condition(1));
+ return;
+ }
+ LoadU64LE(r1, mem);
+ vlvg(dst, r1, MemOperand(r0, 1), Condition(3));
+}
+
+void TurboAssembler::LoadLane8LE(Simd128Register dst, const MemOperand& mem,
+ int index) {
+ vleb(dst, mem, Condition(index));
+}
+#define LOAD_LANE_LIST(V) \
+ V(64, vlebrg, LoadU64LE, 3) \
+ V(32, vlebrf, LoadU32LE, 2) \
+ V(16, vlebrh, LoadU16LE, 1)
+
+#define LOAD_LANE(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::LoadLane##name##LE(Simd128Register dst, \
+ const MemOperand& mem, int lane) { \
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) && \
+ is_uint12(mem.offset())) { \
+ vector_instr(dst, mem, Condition(lane)); \
+ return; \
+ } \
+ scalar_instr(r1, mem); \
+ vlvg(dst, r1, MemOperand(r0, lane), Condition(condition)); \
+ }
+LOAD_LANE_LIST(LOAD_LANE)
+#undef LOAD_LANE
+#undef LOAD_LANE_LIST
+
+void TurboAssembler::StoreLane8LE(Simd128Register src, const MemOperand& mem,
+ int index) {
+ vsteb(src, mem, Condition(index));
+}
+#define STORE_LANE_LIST(V) \
+ V(64, vstebrg, StoreU64LE, 3) \
+ V(32, vstebrf, StoreU32LE, 2) \
+ V(16, vstebrh, StoreU16LE, 1)
+
+#define STORE_LANE(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::StoreLane##name##LE(Simd128Register src, \
+ const MemOperand& mem, int lane) { \
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) && \
+ is_uint12(mem.offset())) { \
+ vector_instr(src, mem, Condition(lane)); \
+ return; \
+ } \
+ vlgv(r1, src, MemOperand(r0, lane), Condition(condition)); \
+ scalar_instr(r1, mem); \
+ }
+STORE_LANE_LIST(STORE_LANE)
+#undef STORE_LANE
+#undef STORE_LANE_LIST
+
#else
void TurboAssembler::LoadU64LE(Register dst, const MemOperand& mem,
Register scratch) {
@@ -3996,6 +4101,83 @@ void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
StoreV128(src, mem, scratch1);
}
+// Vector LE Load and Transform instructions.
+#define LOAD_SPLAT_LIST(V) \
+ V(64x2, 3) \
+ V(32x4, 2) \
+ V(16x8, 1) \
+ V(8x16, 0)
+
+#define LOAD_SPLAT(name, condition) \
+ void TurboAssembler::LoadAndSplat##name##LE(Simd128Register dst, \
+ const MemOperand& mem) { \
+ vlrep(dst, mem, Condition(condition)); \
+ }
+LOAD_SPLAT_LIST(LOAD_SPLAT)
+#undef LOAD_SPLAT
+#undef LOAD_SPLAT_LIST
+
+#define LOAD_EXTEND_LIST(V) \
+ V(32x2U, vuplh, 2) \
+ V(32x2S, vuph, 2) \
+ V(16x4U, vuplh, 1) \
+ V(16x4S, vuph, 1) \
+ V(8x8U, vuplh, 0) \
+ V(8x8S, vuph, 0)
+
+#define LOAD_EXTEND(name, unpack_instr, condition) \
+ void TurboAssembler::LoadAndExtend##name##LE(Simd128Register dst, \
+ const MemOperand& mem) { \
+ vleg(kScratchDoubleReg, mem, Condition(0)); \
+ unpack_instr(dst, kScratchDoubleReg, Condition(0), Condition(0), \
+ Condition(condition)); \
+ }
+LOAD_EXTEND_LIST(LOAD_EXTEND)
+#undef LOAD_EXTEND
+#undef LOAD_EXTEND
+
+void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem) {
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+ vlef(dst, mem, Condition(3));
+}
+
+void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem) {
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+ vleg(dst, mem, Condition(1));
+}
+
+#define LOAD_LANE_LIST(V) \
+ V(64, vleg) \
+ V(32, vlef) \
+ V(16, vleh) \
+ V(8, vleb)
+
+#define LOAD_LANE(name, vector_instr) \
+ void TurboAssembler::LoadLane##name##LE(Simd128Register dst, \
+ const MemOperand& mem, int lane) { \
+ DCHECK(is_uint12(mem.offset())); \
+ vector_instr(dst, mem, Condition(lane)); \
+ }
+LOAD_LANE_LIST(LOAD_LANE)
+#undef LOAD_LANE
+#undef LOAD_LANE_LIST
+
+#define STORE_LANE_LIST(V) \
+ V(64, vsteg) \
+ V(32, vstef) \
+ V(16, vsteh) \
+ V(8, vsteb)
+
+#define STORE_LANE(name, vector_instr) \
+ void TurboAssembler::StoreLane##name##LE(Simd128Register src, \
+ const MemOperand& mem, int lane) { \
+ DCHECK(is_uint12(mem.offset())); \
+ vector_instr(src, mem, Condition(lane)); \
+ }
+STORE_LANE_LIST(STORE_LANE)
+#undef STORE_LANE
+#undef STORE_LANE_LIST
+
#endif
// Load And Test (Reg <- Reg)
@@ -4670,10 +4852,6 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
lay(sp, MemOperand(sp, kSimd128Size));
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- mov(kSpeculationPoisonRegister, Operand(-1));
-}
-
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
larl(dst, Operand(-pc_offset() / 2));
}
@@ -5276,7 +5454,37 @@ SIMD_BINOP_LIST_VRR_C(EMIT_SIMD_BINOP_VRR_C)
#undef EMIT_SIMD_BINOP_VRR_C
#undef SIMD_BINOP_LIST_VRR_C
-// Opcodes without a 1-1 match.
+#define SIMD_SHIFT_LIST(V) \
+ V(I64x2Shl, veslv, 3) \
+ V(I64x2ShrS, vesrav, 3) \
+ V(I64x2ShrU, vesrlv, 3) \
+ V(I32x4Shl, veslv, 2) \
+ V(I32x4ShrS, vesrav, 2) \
+ V(I32x4ShrU, vesrlv, 2) \
+ V(I16x8Shl, veslv, 1) \
+ V(I16x8ShrS, vesrav, 1) \
+ V(I16x8ShrU, vesrlv, 1) \
+ V(I8x16Shl, veslv, 0) \
+ V(I8x16ShrS, vesrav, 0) \
+ V(I8x16ShrU, vesrlv, 0)
+
+#define EMIT_SIMD_SHIFT(name, op, c1) \
+ void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ Register src2) { \
+ vlvg(kScratchDoubleReg, src2, MemOperand(r0, 0), Condition(c1)); \
+ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(c1)); \
+ op(dst, src1, kScratchDoubleReg, Condition(0), Condition(0), \
+ Condition(c1)); \
+ } \
+ void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ const Operand& src2) { \
+ mov(ip, src2); \
+ name(dst, src1, ip); \
+ }
+SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT)
+#undef EMIT_SIMD_SHIFT
+#undef SIMD_SHIFT_LIST
+
void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
Register scratch_1 = r0;
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index 51cdb48326..b7123d5960 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -392,6 +392,27 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch1);
void LoadF64LE(DoubleRegister dst, const MemOperand& opnd, Register scratch);
void LoadF32LE(DoubleRegister dst, const MemOperand& opnd, Register scratch);
+ // Vector LE Load and Transform instructions.
+ void LoadAndSplat64x2LE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndSplat32x4LE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndSplat16x8LE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndSplat8x16LE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend8x8ULE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend8x8SLE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend16x4ULE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend16x4SLE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend32x2ULE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend32x2SLE(Simd128Register dst, const MemOperand& mem);
+ void LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem);
+ void LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem);
+ void LoadLane8LE(Simd128Register dst, const MemOperand& mem, int lane);
+ void LoadLane16LE(Simd128Register dst, const MemOperand& mem, int lane);
+ void LoadLane32LE(Simd128Register dst, const MemOperand& mem, int lane);
+ void LoadLane64LE(Simd128Register dst, const MemOperand& mem, int lane);
+ void StoreLane8LE(Simd128Register src, const MemOperand& mem, int lane);
+ void StoreLane16LE(Simd128Register src, const MemOperand& mem, int lane);
+ void StoreLane32LE(Simd128Register src, const MemOperand& mem, int lane);
+ void StoreLane64LE(Simd128Register src, const MemOperand& mem, int lane);
// Load And Test
void LoadAndTest32(Register dst, Register src);
@@ -1015,7 +1036,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
- void ResetSpeculationPoisonRegister();
void ComputeCodeStartAddress(Register dst);
void LoadPC(Register dst);
@@ -1071,75 +1091,99 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
Register src2, uint8_t imm_lane_idx);
-#define SIMD_BINOP_LIST(V) \
- V(F64x2Add) \
- V(F64x2Sub) \
- V(F64x2Mul) \
- V(F64x2Div) \
- V(F64x2Min) \
- V(F64x2Max) \
- V(F64x2Eq) \
- V(F64x2Ne) \
- V(F64x2Lt) \
- V(F64x2Le) \
- V(F32x4Add) \
- V(F32x4Sub) \
- V(F32x4Mul) \
- V(F32x4Div) \
- V(F32x4Min) \
- V(F32x4Max) \
- V(F32x4Eq) \
- V(F32x4Ne) \
- V(F32x4Lt) \
- V(F32x4Le) \
- V(I64x2Add) \
- V(I64x2Sub) \
- V(I64x2Mul) \
- V(I64x2Eq) \
- V(I64x2Ne) \
- V(I64x2GtS) \
- V(I64x2GeS) \
- V(I32x4Add) \
- V(I32x4Sub) \
- V(I32x4Mul) \
- V(I32x4Eq) \
- V(I32x4Ne) \
- V(I32x4GtS) \
- V(I32x4GeS) \
- V(I32x4GtU) \
- V(I32x4GeU) \
- V(I32x4MinS) \
- V(I32x4MinU) \
- V(I32x4MaxS) \
- V(I32x4MaxU) \
- V(I16x8Add) \
- V(I16x8Sub) \
- V(I16x8Mul) \
- V(I16x8Eq) \
- V(I16x8Ne) \
- V(I16x8GtS) \
- V(I16x8GeS) \
- V(I16x8GtU) \
- V(I16x8GeU) \
- V(I16x8MinS) \
- V(I16x8MinU) \
- V(I16x8MaxS) \
- V(I16x8MaxU) \
- V(I8x16Add) \
- V(I8x16Sub) \
- V(I8x16Eq) \
- V(I8x16Ne) \
- V(I8x16GtS) \
- V(I8x16GeS) \
- V(I8x16GtU) \
- V(I8x16GeU) \
- V(I8x16MinS) \
- V(I8x16MinU) \
- V(I8x16MaxS) \
- V(I8x16MaxU)
-
-#define PROTOTYPE_SIMD_BINOP(name) \
- void name(Simd128Register dst, Simd128Register src1, Simd128Register src2);
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add, Simd128Register) \
+ V(F64x2Sub, Simd128Register) \
+ V(F64x2Mul, Simd128Register) \
+ V(F64x2Div, Simd128Register) \
+ V(F64x2Min, Simd128Register) \
+ V(F64x2Max, Simd128Register) \
+ V(F64x2Eq, Simd128Register) \
+ V(F64x2Ne, Simd128Register) \
+ V(F64x2Lt, Simd128Register) \
+ V(F64x2Le, Simd128Register) \
+ V(F32x4Add, Simd128Register) \
+ V(F32x4Sub, Simd128Register) \
+ V(F32x4Mul, Simd128Register) \
+ V(F32x4Div, Simd128Register) \
+ V(F32x4Min, Simd128Register) \
+ V(F32x4Max, Simd128Register) \
+ V(F32x4Eq, Simd128Register) \
+ V(F32x4Ne, Simd128Register) \
+ V(F32x4Lt, Simd128Register) \
+ V(F32x4Le, Simd128Register) \
+ V(I64x2Add, Simd128Register) \
+ V(I64x2Sub, Simd128Register) \
+ V(I64x2Mul, Simd128Register) \
+ V(I64x2Eq, Simd128Register) \
+ V(I64x2Ne, Simd128Register) \
+ V(I64x2GtS, Simd128Register) \
+ V(I64x2GeS, Simd128Register) \
+ V(I64x2Shl, Register) \
+ V(I64x2ShrS, Register) \
+ V(I64x2ShrU, Register) \
+ V(I64x2Shl, const Operand&) \
+ V(I64x2ShrS, const Operand&) \
+ V(I64x2ShrU, const Operand&) \
+ V(I32x4Add, Simd128Register) \
+ V(I32x4Sub, Simd128Register) \
+ V(I32x4Mul, Simd128Register) \
+ V(I32x4Eq, Simd128Register) \
+ V(I32x4Ne, Simd128Register) \
+ V(I32x4GtS, Simd128Register) \
+ V(I32x4GeS, Simd128Register) \
+ V(I32x4GtU, Simd128Register) \
+ V(I32x4GeU, Simd128Register) \
+ V(I32x4MinS, Simd128Register) \
+ V(I32x4MinU, Simd128Register) \
+ V(I32x4MaxS, Simd128Register) \
+ V(I32x4MaxU, Simd128Register) \
+ V(I32x4Shl, Register) \
+ V(I32x4ShrS, Register) \
+ V(I32x4ShrU, Register) \
+ V(I32x4Shl, const Operand&) \
+ V(I32x4ShrS, const Operand&) \
+ V(I32x4ShrU, const Operand&) \
+ V(I16x8Add, Simd128Register) \
+ V(I16x8Sub, Simd128Register) \
+ V(I16x8Mul, Simd128Register) \
+ V(I16x8Eq, Simd128Register) \
+ V(I16x8Ne, Simd128Register) \
+ V(I16x8GtS, Simd128Register) \
+ V(I16x8GeS, Simd128Register) \
+ V(I16x8GtU, Simd128Register) \
+ V(I16x8GeU, Simd128Register) \
+ V(I16x8MinS, Simd128Register) \
+ V(I16x8MinU, Simd128Register) \
+ V(I16x8MaxS, Simd128Register) \
+ V(I16x8MaxU, Simd128Register) \
+ V(I16x8Shl, Register) \
+ V(I16x8ShrS, Register) \
+ V(I16x8ShrU, Register) \
+ V(I16x8Shl, const Operand&) \
+ V(I16x8ShrS, const Operand&) \
+ V(I16x8ShrU, const Operand&) \
+ V(I8x16Add, Simd128Register) \
+ V(I8x16Sub, Simd128Register) \
+ V(I8x16Eq, Simd128Register) \
+ V(I8x16Ne, Simd128Register) \
+ V(I8x16GtS, Simd128Register) \
+ V(I8x16GeS, Simd128Register) \
+ V(I8x16GtU, Simd128Register) \
+ V(I8x16GeU, Simd128Register) \
+ V(I8x16MinS, Simd128Register) \
+ V(I8x16MinU, Simd128Register) \
+ V(I8x16MaxS, Simd128Register) \
+ V(I8x16MaxU, Simd128Register) \
+ V(I8x16Shl, Register) \
+ V(I8x16ShrS, Register) \
+ V(I8x16ShrU, Register) \
+ V(I8x16Shl, const Operand&) \
+ V(I8x16ShrS, const Operand&) \
+ V(I8x16ShrU, const Operand&)
+
+#define PROTOTYPE_SIMD_BINOP(name, stype) \
+ void name(Simd128Register dst, Simd128Register src1, stype src2);
SIMD_BINOP_LIST(PROTOTYPE_SIMD_BINOP)
#undef PROTOTYPE_SIMD_BINOP
#undef SIMD_BINOP_LIST
diff --git a/deps/v8/src/codegen/s390/register-s390.h b/deps/v8/src/codegen/s390/register-s390.h
index 48accf08c5..6e3b6a3e2b 100644
--- a/deps/v8/src/codegen/s390/register-s390.h
+++ b/deps/v8/src/codegen/s390/register-s390.h
@@ -253,7 +253,6 @@ constexpr Register kReturnRegister2 = r4;
constexpr Register kJSFunctionRegister = r3;
constexpr Register kContextRegister = r13;
constexpr Register kAllocateSizeRegister = r3;
-constexpr Register kSpeculationPoisonRegister = r9;
constexpr Register kInterpreterAccumulatorRegister = r2;
constexpr Register kInterpreterBytecodeOffsetRegister = r6;
constexpr Register kInterpreterBytecodeArrayRegister = r7;
diff --git a/deps/v8/src/codegen/script-details.h b/deps/v8/src/codegen/script-details.h
index a0a364c6b5..e342e132d7 100644
--- a/deps/v8/src/codegen/script-details.h
+++ b/deps/v8/src/codegen/script-details.h
@@ -5,6 +5,7 @@
#ifndef V8_CODEGEN_SCRIPT_DETAILS_H_
#define V8_CODEGEN_SCRIPT_DETAILS_H_
+#include "include/v8-script.h"
#include "src/common/globals.h"
#include "src/objects/fixed-array.h"
#include "src/objects/objects.h"
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
index edd1a977e6..dc39be5b84 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
@@ -6,6 +6,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/cpu-features.h"
+#include "src/codegen/register-arch.h"
#if V8_TARGET_ARCH_IA32
#include "src/codegen/ia32/register-ia32.h"
@@ -15,9 +16,28 @@
#error Unsupported target architecture.
#endif
+// Operand on IA32 can be a wrapper for a single register, in which case they
+// should call I8x16Splat |src| being Register.
+#if V8_TARGET_ARCH_IA32
+#define DCHECK_OPERAND_IS_NOT_REG(op) DCHECK(!op.is_reg_only());
+#else
+#define DCHECK_OPERAND_IS_NOT_REG(op)
+#endif
+
namespace v8 {
namespace internal {
+void SharedTurboAssembler::Move(Register dst, uint32_t src) {
+ // Helper to paper over the different assembler function names.
+#if V8_TARGET_ARCH_IA32
+ mov(dst, Immediate(src));
+#elif V8_TARGET_ARCH_X64
+ movl(dst, Immediate(src));
+#else
+#error Unsupported target architecture.
+#endif
+}
+
void SharedTurboAssembler::Move(Register dst, Register src) {
// Helper to paper over the different assembler function names.
if (dst != src) {
@@ -31,6 +51,17 @@ void SharedTurboAssembler::Move(Register dst, Register src) {
}
}
+void SharedTurboAssembler::Add(Register dst, Immediate src) {
+ // Helper to paper over the different assembler function names.
+#if V8_TARGET_ARCH_IA32
+ add(dst, src);
+#elif V8_TARGET_ARCH_X64
+ addq(dst, src);
+#else
+#error Unsupported target architecture.
+#endif
+}
+
void SharedTurboAssembler::And(Register dst, Immediate src) {
// Helper to paper over the different assembler function names.
#if V8_TARGET_ARCH_IA32
@@ -42,17 +73,6 @@ void SharedTurboAssembler::And(Register dst, Immediate src) {
#endif
}
-void SharedTurboAssembler::Movapd(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovapd(dst, src);
- } else {
- // On SSE, movaps is 1 byte shorter than movapd, and has the same
- // behavior.
- movaps(dst, src);
- }
-}
-
void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1,
XMMRegister src2, uint8_t imm8) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -89,7 +109,7 @@ void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src,
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
if (lane == 0) {
- vpblendw(dst, src, rep, 0b00001111);
+ vmovsd(dst, src, rep);
} else {
vmovlhps(dst, src, rep);
}
@@ -100,7 +120,7 @@ void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src,
movaps(dst, src);
}
if (lane == 0) {
- pblendw(dst, rep, 0b00001111);
+ movsd(dst, rep);
} else {
movlhps(dst, rep);
}
@@ -231,6 +251,187 @@ void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
}
}
+template <typename Op>
+void SharedTurboAssembler::I8x16SplatPreAvx2(XMMRegister dst, Op src,
+ XMMRegister scratch) {
+ DCHECK(!CpuFeatures::IsSupported(AVX2));
+ CpuFeatureScope ssse3_scope(this, SSSE3);
+ Movd(dst, src);
+ Xorps(scratch, scratch);
+ Pshufb(dst, scratch);
+}
+
+void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Register src,
+ XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ Movd(scratch, src);
+ vpbroadcastb(dst, scratch);
+ } else {
+ I8x16SplatPreAvx2(dst, src, scratch);
+ }
+}
+
+void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Operand src,
+ XMMRegister scratch) {
+ DCHECK_OPERAND_IS_NOT_REG(src);
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ vpbroadcastb(dst, src);
+ } else {
+ I8x16SplatPreAvx2(dst, src, scratch);
+ }
+}
+
+void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
+ uint8_t src2, Register tmp1,
+ XMMRegister tmp2) {
+ DCHECK_NE(dst, tmp2);
+ // Perform 16-bit shift, then mask away low bits.
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
+ movaps(dst, src1);
+ src1 = dst;
+ }
+
+ uint8_t shift = truncate_to_int3(src2);
+ Psllw(dst, src1, byte{shift});
+
+ uint8_t bmask = static_cast<uint8_t>(0xff << shift);
+ uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
+ Move(tmp1, mask);
+ Movd(tmp2, tmp1);
+ Pshufd(tmp2, tmp2, uint8_t{0});
+ Pand(dst, tmp2);
+}
+
+void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
+ Register src2, Register tmp1,
+ XMMRegister tmp2, XMMRegister tmp3) {
+ DCHECK(!AreAliased(dst, tmp2, tmp3));
+ DCHECK(!AreAliased(src1, tmp2, tmp3));
+
+ // Take shift value modulo 8.
+ Move(tmp1, src2);
+ And(tmp1, Immediate(7));
+ Add(tmp1, Immediate(8));
+ // Create a mask to unset high bits.
+ Movd(tmp3, tmp1);
+ Pcmpeqd(tmp2, tmp2);
+ Psrlw(tmp2, tmp2, tmp3);
+ Packuswb(tmp2, tmp2);
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
+ movaps(dst, src1);
+ src1 = dst;
+ }
+ // Mask off the unwanted bits before word-shifting.
+ Pand(dst, src1, tmp2);
+ Add(tmp1, Immediate(-8));
+ Movd(tmp3, tmp1);
+ Psllw(dst, dst, tmp3);
+}
+
+void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
+ uint8_t src2, XMMRegister tmp) {
+ // Unpack bytes into words, do word (16-bit) shifts, and repack.
+ DCHECK_NE(dst, tmp);
+ uint8_t shift = truncate_to_int3(src2) + 8;
+
+ Punpckhbw(tmp, src1);
+ Punpcklbw(dst, src1);
+ Psraw(tmp, shift);
+ Psraw(dst, shift);
+ Packsswb(dst, tmp);
+}
+
+void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
+ Register src2, Register tmp1,
+ XMMRegister tmp2, XMMRegister tmp3) {
+ DCHECK(!AreAliased(dst, tmp2, tmp3));
+ DCHECK_NE(src1, tmp2);
+
+ // Unpack the bytes into words, do arithmetic shifts, and repack.
+ Punpckhbw(tmp2, src1);
+ Punpcklbw(dst, src1);
+ // Prepare shift value
+ Move(tmp1, src2);
+ // Take shift value modulo 8.
+ And(tmp1, Immediate(7));
+ Add(tmp1, Immediate(8));
+ Movd(tmp3, tmp1);
+ Psraw(tmp2, tmp3);
+ Psraw(dst, tmp3);
+ Packsswb(dst, tmp2);
+}
+
+void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
+ uint8_t src2, Register tmp1,
+ XMMRegister tmp2) {
+ DCHECK_NE(dst, tmp2);
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
+ movaps(dst, src1);
+ src1 = dst;
+ }
+
+ // Perform 16-bit shift, then mask away high bits.
+ uint8_t shift = truncate_to_int3(src2);
+ Psrlw(dst, src1, shift);
+
+ uint8_t bmask = 0xff >> shift;
+ uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
+ Move(tmp1, mask);
+ Movd(tmp2, tmp1);
+ Pshufd(tmp2, tmp2, byte{0});
+ Pand(dst, tmp2);
+}
+
+void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
+ Register src2, Register tmp1,
+ XMMRegister tmp2, XMMRegister tmp3) {
+ DCHECK(!AreAliased(dst, tmp2, tmp3));
+ DCHECK_NE(src1, tmp2);
+
+ // Unpack the bytes into words, do logical shifts, and repack.
+ Punpckhbw(tmp2, src1);
+ Punpcklbw(dst, src1);
+ // Prepare shift value.
+ Move(tmp1, src2);
+ // Take shift value modulo 8.
+ And(tmp1, Immediate(7));
+ Add(tmp1, Immediate(8));
+ Movd(tmp3, tmp1);
+ Psrlw(tmp2, tmp3);
+ Psrlw(dst, tmp3);
+ Packuswb(dst, tmp2);
+}
+
+template <typename Op>
+void SharedTurboAssembler::I16x8SplatPreAvx2(XMMRegister dst, Op src) {
+ DCHECK(!CpuFeatures::IsSupported(AVX2));
+ Movd(dst, src);
+ Pshuflw(dst, dst, uint8_t{0x0});
+ Punpcklqdq(dst, dst);
+}
+
+void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Register src) {
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ Movd(dst, src);
+ vpbroadcastw(dst, dst);
+ } else {
+ I16x8SplatPreAvx2(dst, src);
+ }
+}
+
+void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Operand src) {
+ DCHECK_OPERAND_IS_NOT_REG(src);
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ vpbroadcastw(dst, src);
+ } else {
+ I16x8SplatPreAvx2(dst, src);
+ }
+}
+
void SharedTurboAssembler::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister scratch,
bool is_signed) {
@@ -358,6 +559,65 @@ void SharedTurboAssembler::I16x8UConvertI8x16High(XMMRegister dst,
}
}
+void SharedTurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2,
+ XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
+ // k = i16x8.splat(0x8000)
+ Pcmpeqd(scratch, scratch);
+ Psllw(scratch, scratch, byte{15});
+
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
+ movaps(dst, src1);
+ src1 = dst;
+ }
+
+ Pmulhrsw(dst, src1, src2);
+ Pcmpeqw(scratch, dst);
+ Pxor(dst, scratch);
+}
+
+void SharedTurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
+ XMMRegister src,
+ XMMRegister tmp) {
+ ASM_CODE_COMMENT(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // src = |a|b|c|d|e|f|g|h| (low)
+ // scratch = |0|a|0|c|0|e|0|g|
+ vpsrld(tmp, src, 16);
+ // dst = |0|b|0|d|0|f|0|h|
+ vpblendw(dst, src, tmp, 0xAA);
+ // dst = |a+b|c+d|e+f|g+h|
+ vpaddd(dst, tmp, dst);
+ } else if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ // There is a potentially better lowering if we get rip-relative
+ // constants, see https://github.com/WebAssembly/simd/pull/380.
+ movaps(tmp, src);
+ psrld(tmp, 16);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ pblendw(dst, tmp, 0xAA);
+ paddd(dst, tmp);
+ } else {
+ // src = |a|b|c|d|e|f|g|h|
+ // tmp = i32x4.splat(0x0000FFFF)
+ pcmpeqd(tmp, tmp);
+ psrld(tmp, byte{16});
+ // tmp =|0|b|0|d|0|f|0|h|
+ andps(tmp, src);
+ // dst = |0|a|0|c|0|e|0|g|
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ psrld(dst, byte{16});
+ // dst = |a+b|c+d|e+f|g+h|
+ paddd(dst, tmp);
+ }
+}
+
// 1. Multiply low word into scratch.
// 2. Multiply high word (can be signed or unsigned) into dst.
// 3. Unpack and interleave scratch and dst into dst.
@@ -539,7 +799,7 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
Psllq(xmm_tmp, byte{63});
if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
- Movapd(dst, src);
+ movaps(dst, src);
src = dst;
}
// Add a bias of 2^63 to convert signed to unsigned.
@@ -572,7 +832,7 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
Movd(xmm_shift, tmp_shift);
if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
- Movapd(dst, src);
+ movaps(dst, src);
src = dst;
}
Pxor(dst, src, xmm_tmp);
@@ -640,11 +900,16 @@ void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst,
vpxor(scratch, scratch, scratch);
vpunpckhdq(dst, src, scratch);
} else {
- if (dst != src) {
- movaps(dst, src);
+ if (dst == src) {
+ // xorps can be executed on more ports than pshufd.
+ xorps(scratch, scratch);
+ punpckhdq(dst, scratch);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ // No dependency on dst.
+ pshufd(dst, src, 0xEE);
+ pmovzxdq(dst, dst);
}
- xorps(scratch, scratch);
- punpckhdq(dst, scratch);
}
}
@@ -679,5 +944,74 @@ void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
}
}
+void SharedTurboAssembler::S128Load8Splat(XMMRegister dst, Operand src,
+ XMMRegister scratch) {
+ // The trap handler uses the current pc to creating a landing, so that it can
+ // determine if a trap occured in Wasm code due to a OOB load. Make sure the
+ // first instruction in each case below is the one that loads.
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ vpbroadcastb(dst, src);
+ } else if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // Avoid dependency on previous value of dst.
+ vpinsrb(dst, scratch, src, uint8_t{0});
+ vpxor(scratch, scratch, scratch);
+ vpshufb(dst, dst, scratch);
+ } else {
+ CpuFeatureScope ssse4_scope(this, SSE4_1);
+ CpuFeatureScope ssse3_scope(this, SSSE3);
+ pinsrb(dst, src, uint8_t{0});
+ xorps(scratch, scratch);
+ pshufb(dst, scratch);
+ }
+}
+
+void SharedTurboAssembler::S128Load16Splat(XMMRegister dst, Operand src,
+ XMMRegister scratch) {
+ // The trap handler uses the current pc to creating a landing, so that it can
+ // determine if a trap occured in Wasm code due to a OOB load. Make sure the
+ // first instruction in each case below is the one that loads.
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ vpbroadcastw(dst, src);
+ } else if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // Avoid dependency on previous value of dst.
+ vpinsrw(dst, scratch, src, uint8_t{0});
+ vpshuflw(dst, dst, uint8_t{0});
+ vpunpcklqdq(dst, dst, dst);
+ } else {
+ pinsrw(dst, src, uint8_t{0});
+ pshuflw(dst, dst, uint8_t{0});
+ movlhps(dst, dst);
+ }
+}
+
+void SharedTurboAssembler::S128Load32Splat(XMMRegister dst, Operand src) {
+ // The trap handler uses the current pc to creating a landing, so that it can
+ // determine if a trap occured in Wasm code due to a OOB load. Make sure the
+ // first instruction in each case below is the one that loads.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vbroadcastss(dst, src);
+ } else {
+ movss(dst, src);
+ shufps(dst, dst, byte{0});
+ }
+}
+
+void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
+ uint8_t laneidx) {
+ if (laneidx == 0) {
+ Movlps(dst, src);
+ } else {
+ DCHECK_EQ(1, laneidx);
+ Movhps(dst, src);
+ }
+}
+
} // namespace internal
} // namespace v8
+
+#undef DCHECK_OPERAND_IS_NOT_REG
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
index 7c6f7185b9..c2d07392ac 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
@@ -29,28 +29,44 @@ constexpr int kStackSavedSavedFPSize = 2 * kDoubleSize;
constexpr int kStackSavedSavedFPSize = kDoubleSize;
#endif // V8_ENABLE_WEBASSEMBLY
+// Base class for SharedTurboAssemblerBase. This class contains macro-assembler
+// functions that can be shared across ia32 and x64 without any template
+// machinery, i.e. does not require the CRTP pattern that
+// SharedTurboAssemblerBase exposes. This allows us to keep the bulk of
+// definition inside a separate source file, rather than putting everything
+// inside this header.
class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
+ void Move(Register dst, uint32_t src);
// Move if registers are not identical.
void Move(Register dst, Register src);
+ void Add(Register dst, Immediate src);
void And(Register dst, Immediate src);
- void Movapd(XMMRegister dst, XMMRegister src);
-
- template <typename Dst, typename Src>
- void Movdqu(Dst dst, Src src) {
+ // Supports both SSE and AVX. Move src1 to dst if they are not equal on SSE.
+ template <typename Op>
+ void Pshufb(XMMRegister dst, XMMRegister src, Op mask) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
- vmovdqu(dst, src);
+ vpshufb(dst, src, mask);
} else {
- // movups is 1 byte shorter than movdqu. On most SSE systems, this incurs
- // no delay moving between integer and floating-point domain.
- movups(dst, src);
+ // Make sure these are different so that we won't overwrite mask.
+ DCHECK_NE(mask, dst);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ CpuFeatureScope sse_scope(this, SSSE3);
+ pshufb(dst, mask);
}
}
+ template <typename Op>
+ void Pshufb(XMMRegister dst, Op mask) {
+ Pshufb(dst, dst, mask);
+ }
+
// Shufps that will mov src1 into dst if AVX is not supported.
void Shufps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
uint8_t imm8);
@@ -128,6 +144,25 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
args...); \
}
+// Define a macro which uses |avx_name| when AVX is supported, and |sse_name|
+// when AVX is not supported. This is useful for bit-wise instructions like
+// andpd/andps, where the behavior is exactly the same, but the *ps
+// version is 1 byte shorter, and on SSE-only processors there is no
+// performance difference since those processors don't differentiate integer
+// and floating-point domains.
+// Note: we require |avx_name| to be the AVX instruction without the "v"
+// prefix. If we require the full AVX instruction name and the caller
+// accidentally passes in a SSE instruction, we compile without any issues and
+// generate the SSE instruction. By appending "v" here, we ensure that we will
+// generate an AVX instruction.
+#define AVX_OP_WITH_DIFF_SSE_INSTR(macro_name, avx_name, sse_name) \
+ template <typename Dst, typename Arg, typename... Args> \
+ void macro_name(Dst dst, Arg arg, Args... args) { \
+ AvxHelper<Dst, Arg, Args...>{this} \
+ .template emit<&Assembler::v##avx_name, &Assembler::sse_name>( \
+ dst, arg, args...); \
+ }
+
#define AVX_OP_SSE3(macro_name, name) \
template <typename Dst, typename Arg, typename... Args> \
void macro_name(Dst dst, Arg arg, Args... args) { \
@@ -163,15 +198,20 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
// Keep this list sorted by required extension, then instruction name.
AVX_OP(Addpd, addpd)
AVX_OP(Addps, addps)
+ AVX_OP(Addsd, addsd)
+ AVX_OP(Addss, addss)
AVX_OP(Andnpd, andnpd)
AVX_OP(Andnps, andnps)
AVX_OP(Andpd, andpd)
AVX_OP(Andps, andps)
AVX_OP(Cmpeqpd, cmpeqpd)
+ AVX_OP(Cmpeqps, cmpeqps)
AVX_OP(Cmplepd, cmplepd)
AVX_OP(Cmpleps, cmpleps)
AVX_OP(Cmpltpd, cmpltpd)
+ AVX_OP(Cmpltps, cmpltps)
AVX_OP(Cmpneqpd, cmpneqpd)
+ AVX_OP(Cmpneqps, cmpneqps)
AVX_OP(Cmpunordpd, cmpunordpd)
AVX_OP(Cmpunordps, cmpunordps)
AVX_OP(Cvtdq2pd, cvtdq2pd)
@@ -181,6 +221,8 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Cvttps2dq, cvttps2dq)
AVX_OP(Divpd, divpd)
AVX_OP(Divps, divps)
+ AVX_OP(Divsd, divsd)
+ AVX_OP(Divss, divss)
AVX_OP(Maxpd, maxpd)
AVX_OP(Maxps, maxps)
AVX_OP(Minpd, minpd)
@@ -198,6 +240,8 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Movups, movups)
AVX_OP(Mulpd, mulpd)
AVX_OP(Mulps, mulps)
+ AVX_OP(Mulsd, mulsd)
+ AVX_OP(Mulss, mulss)
AVX_OP(Orpd, orpd)
AVX_OP(Orps, orps)
AVX_OP(Packssdw, packssdw)
@@ -207,20 +251,26 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Paddd, paddd)
AVX_OP(Paddq, paddq)
AVX_OP(Paddsb, paddsb)
+ AVX_OP(Paddsw, paddsw)
AVX_OP(Paddusb, paddusb)
AVX_OP(Paddusw, paddusw)
AVX_OP(Paddw, paddw)
- AVX_OP(Pand, pand)
AVX_OP(Pavgb, pavgb)
AVX_OP(Pavgw, pavgw)
AVX_OP(Pcmpgtb, pcmpgtb)
+ AVX_OP(Pcmpgtd, pcmpgtd)
+ AVX_OP(Pcmpgtw, pcmpgtw)
AVX_OP(Pcmpeqd, pcmpeqd)
+ AVX_OP(Pcmpeqw, pcmpeqw)
+ AVX_OP(Pinsrw, pinsrw)
+ AVX_OP(Pmaddwd, pmaddwd)
+ AVX_OP(Pmaxsw, pmaxsw)
AVX_OP(Pmaxub, pmaxub)
+ AVX_OP(Pminsw, pminsw)
AVX_OP(Pminub, pminub)
AVX_OP(Pmovmskb, pmovmskb)
AVX_OP(Pmullw, pmullw)
AVX_OP(Pmuludq, pmuludq)
- AVX_OP(Por, por)
AVX_OP(Pshufd, pshufd)
AVX_OP(Pshufhw, pshufhw)
AVX_OP(Pshuflw, pshuflw)
@@ -236,7 +286,9 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Psubd, psubd)
AVX_OP(Psubq, psubq)
AVX_OP(Psubsb, psubsb)
+ AVX_OP(Psubsw, psubsw)
AVX_OP(Psubusb, psubusb)
+ AVX_OP(Psubusw, psubusw)
AVX_OP(Psubw, psubw)
AVX_OP(Punpckhbw, punpckhbw)
AVX_OP(Punpckhdq, punpckhdq)
@@ -246,7 +298,6 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Punpckldq, punpckldq)
AVX_OP(Punpcklqdq, punpcklqdq)
AVX_OP(Punpcklwd, punpcklwd)
- AVX_OP(Pxor, pxor)
AVX_OP(Rcpps, rcpps)
AVX_OP(Rsqrtps, rsqrtps)
AVX_OP(Sqrtpd, sqrtpd)
@@ -255,10 +306,18 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Sqrtss, sqrtss)
AVX_OP(Subpd, subpd)
AVX_OP(Subps, subps)
+ AVX_OP(Subsd, subsd)
+ AVX_OP(Subss, subss)
AVX_OP(Unpcklps, unpcklps)
AVX_OP(Xorpd, xorpd)
AVX_OP(Xorps, xorps)
+ AVX_OP_WITH_DIFF_SSE_INSTR(Movapd, movapd, movaps)
+ AVX_OP_WITH_DIFF_SSE_INSTR(Movdqu, movdqu, movups)
+ AVX_OP_WITH_DIFF_SSE_INSTR(Pand, pand, andps)
+ AVX_OP_WITH_DIFF_SSE_INSTR(Por, por, orps)
+ AVX_OP_WITH_DIFF_SSE_INSTR(Pxor, pxor, xorps)
+
AVX_OP_SSE3(Haddps, haddps)
AVX_OP_SSE3(Movddup, movddup)
AVX_OP_SSE3(Movshdup, movshdup)
@@ -267,23 +326,32 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP_SSSE3(Pabsd, pabsd)
AVX_OP_SSSE3(Pabsw, pabsw)
AVX_OP_SSSE3(Palignr, palignr)
+ AVX_OP_SSSE3(Pmulhrsw, pmulhrsw)
AVX_OP_SSSE3(Psignb, psignb)
AVX_OP_SSSE3(Psignd, psignd)
AVX_OP_SSSE3(Psignw, psignw)
AVX_OP_SSE4_1(Extractps, extractps)
+ AVX_OP_SSE4_1(Packusdw, packusdw)
AVX_OP_SSE4_1(Pblendw, pblendw)
AVX_OP_SSE4_1(Pextrb, pextrb)
AVX_OP_SSE4_1(Pextrw, pextrw)
+ AVX_OP_SSE4_1(Pinsrb, pinsrb)
AVX_OP_SSE4_1(Pmaxsb, pmaxsb)
AVX_OP_SSE4_1(Pmaxsd, pmaxsd)
+ AVX_OP_SSE4_1(Pmaxud, pmaxud)
+ AVX_OP_SSE4_1(Pmaxuw, pmaxuw)
AVX_OP_SSE4_1(Pminsb, pminsb)
+ AVX_OP_SSE4_1(Pminsd, pminsd)
+ AVX_OP_SSE4_1(Pminud, pminud)
+ AVX_OP_SSE4_1(Pminuw, pminuw)
AVX_OP_SSE4_1(Pmovsxbw, pmovsxbw)
AVX_OP_SSE4_1(Pmovsxdq, pmovsxdq)
AVX_OP_SSE4_1(Pmovsxwd, pmovsxwd)
AVX_OP_SSE4_1(Pmovzxbw, pmovzxbw)
AVX_OP_SSE4_1(Pmovzxdq, pmovzxdq)
AVX_OP_SSE4_1(Pmovzxwd, pmovzxwd)
+ AVX_OP_SSE4_1(Pmulld, pmulld)
AVX_OP_SSE4_1(Ptest, ptest)
AVX_OP_SSE4_1(Roundpd, roundpd)
AVX_OP_SSE4_1(Roundps, roundps)
@@ -298,6 +366,22 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void F32x4Splat(XMMRegister dst, DoubleRegister src);
void F32x4ExtractLane(FloatRegister dst, XMMRegister src, uint8_t lane);
void S128Store32Lane(Operand dst, XMMRegister src, uint8_t laneidx);
+ void I8x16Splat(XMMRegister dst, Register src, XMMRegister scratch);
+ void I8x16Splat(XMMRegister dst, Operand src, XMMRegister scratch);
+ void I8x16Shl(XMMRegister dst, XMMRegister src1, uint8_t src2, Register tmp1,
+ XMMRegister tmp2);
+ void I8x16Shl(XMMRegister dst, XMMRegister src1, Register src2, Register tmp1,
+ XMMRegister tmp2, XMMRegister tmp3);
+ void I8x16ShrS(XMMRegister dst, XMMRegister src1, uint8_t src2,
+ XMMRegister tmp);
+ void I8x16ShrS(XMMRegister dst, XMMRegister src1, Register src2,
+ Register tmp1, XMMRegister tmp2, XMMRegister tmp3);
+ void I8x16ShrU(XMMRegister dst, XMMRegister src1, uint8_t src2, Register tmp1,
+ XMMRegister tmp2);
+ void I8x16ShrU(XMMRegister dst, XMMRegister src1, Register src2,
+ Register tmp1, XMMRegister tmp2, XMMRegister tmp3);
+ void I16x8Splat(XMMRegister dst, Register src);
+ void I16x8Splat(XMMRegister dst, Operand src);
void I16x8ExtMulLow(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister scrat, bool is_signed);
void I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1, XMMRegister src2,
@@ -307,6 +391,11 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void I16x8SConvertI8x16High(XMMRegister dst, XMMRegister src);
void I16x8UConvertI8x16High(XMMRegister dst, XMMRegister src,
XMMRegister scratch);
+ // Will move src1 to dst if AVX is not supported.
+ void I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister scratch);
+ void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src,
+ XMMRegister tmp);
// Requires that dst == src1 if AVX is not supported.
void I32x4ExtMul(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister scratch, bool low, bool is_signed);
@@ -333,7 +422,338 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
// Requires dst == mask when AVX is not supported.
void S128Select(XMMRegister dst, XMMRegister mask, XMMRegister src1,
XMMRegister src2, XMMRegister scratch);
+ void S128Load8Splat(XMMRegister dst, Operand src, XMMRegister scratch);
+ void S128Load16Splat(XMMRegister dst, Operand src, XMMRegister scratch);
+ void S128Load32Splat(XMMRegister dst, Operand src);
+ void S128Store64Lane(Operand dst, XMMRegister src, uint8_t laneidx);
+
+ private:
+ template <typename Op>
+ void I8x16SplatPreAvx2(XMMRegister dst, Op src, XMMRegister scratch);
+ template <typename Op>
+ void I16x8SplatPreAvx2(XMMRegister dst, Op src);
+};
+
+// Common base class template shared by ia32 and x64 TurboAssembler. This uses
+// the Curiously Recurring Template Pattern (CRTP), where Impl is the actual
+// class (subclass of SharedTurboAssemblerBase instantiated with the actual
+// class). This allows static polymorphism, where member functions can be move
+// into SharedTurboAssembler, and we can also call into member functions
+// defined in ia32 or x64 specific TurboAssembler from within this template
+// class, via Impl.
+//
+// Note: all member functions must be defined in this header file so that the
+// compiler can generate code for the function definitions. See
+// https://isocpp.org/wiki/faq/templates#templates-defn-vs-decl for rationale.
+// If a function does not need polymorphism, move it into SharedTurboAssembler,
+// and define it outside of this header.
+template <typename Impl>
+class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler {
+ using SharedTurboAssembler::SharedTurboAssembler;
+
+ public:
+ void F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src,
+ Register scratch) {
+ ASM_CODE_COMMENT(this);
+ // dst = [ src_low, 0x43300000, src_high, 0x4330000 ];
+ // 0x43300000'00000000 is a special double where the significand bits
+ // precisely represents all uint32 numbers.
+ if (!CpuFeatures::IsSupported(AVX) && dst != src) {
+ movaps(dst, src);
+ src = dst;
+ }
+ Unpcklps(dst, src,
+ ExternalReferenceAsOperand(
+ ExternalReference::
+ address_of_wasm_f64x2_convert_low_i32x4_u_int_mask(),
+ scratch));
+ Subpd(dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_double_2_power_52(), scratch));
+ }
+
+ void I32x4SConvertF32x4(XMMRegister dst, XMMRegister src, XMMRegister tmp,
+ Register scratch) {
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_int32_overflow_as_float(), scratch);
+
+ // This algorithm works by:
+ // 1. lanes with NaNs are zero-ed
+ // 2. lanes ge than 2147483648.0f (MAX_INT32+1) set to 0xffff'ffff
+ // 3. cvttps2dq sets all out of range lanes to 0x8000'0000
+ // a. correct for underflows (< MIN_INT32)
+ // b. wrong for overflow, and we know which lanes overflow from 2.
+ // 4. adjust for 3b by xor-ing 2 and 3
+ // a. 0x8000'0000 xor 0xffff'ffff = 0x7fff'ffff (MAX_INT32)
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcmpeqps(tmp, src, src);
+ vandps(dst, src, tmp);
+ vcmpgeps(tmp, src, op);
+ vcvttps2dq(dst, dst);
+ vpxor(dst, dst, tmp);
+ } else {
+ if (src == dst) {
+ movaps(tmp, src);
+ cmpeqps(tmp, tmp);
+ andps(dst, tmp);
+ movaps(tmp, op);
+ cmpleps(tmp, dst);
+ cvttps2dq(dst, dst);
+ xorps(dst, tmp);
+ } else {
+ movaps(tmp, op);
+ cmpleps(tmp, src);
+ cvttps2dq(dst, src);
+ xorps(dst, tmp);
+ movaps(tmp, src);
+ cmpeqps(tmp, tmp);
+ andps(dst, tmp);
+ }
+ }
+ }
+
+ void I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch, Register tmp) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ XMMRegister original_dst = dst;
+ // Make sure we don't overwrite src.
+ if (dst == src) {
+ DCHECK_NE(src, scratch);
+ dst = scratch;
+ }
+ // dst = 0 if src == NaN, else all ones.
+ vcmpeqpd(dst, src, src);
+ // dst = 0 if src == NaN, else INT32_MAX as double.
+ vandpd(
+ dst, dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_int32_max_as_double(), tmp));
+ // dst = 0 if src == NaN, src is saturated to INT32_MAX as double.
+ vminpd(dst, src, dst);
+ // Values > INT32_MAX already saturated, values < INT32_MIN raises an
+ // exception, which is masked and returns 0x80000000.
+ vcvttpd2dq(original_dst, dst);
+ } else {
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ movaps(scratch, dst);
+ cmpeqpd(scratch, dst);
+ andps(scratch,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_int32_max_as_double(), tmp));
+ minpd(dst, scratch);
+ cvttpd2dq(dst, dst);
+ }
+ }
+
+ void I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch, Register tmp) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vxorpd(scratch, scratch, scratch);
+ // Saturate to 0.
+ vmaxpd(dst, src, scratch);
+ // Saturate to UINT32_MAX.
+ vminpd(
+ dst, dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_uint32_max_as_double(), tmp));
+ // Truncate.
+ vroundpd(dst, dst, kRoundToZero);
+ // Add to special double where significant bits == uint32.
+ vaddpd(dst, dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_double_2_power_52(), tmp));
+ // Extract low 32 bits of each double's significand, zero top lanes.
+ // dst = [dst[0], dst[2], 0, 0]
+ vshufps(dst, dst, scratch, 0x88);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ xorps(scratch, scratch);
+ maxpd(dst, scratch);
+ minpd(dst, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_uint32_max_as_double(),
+ tmp));
+ roundpd(dst, dst, kRoundToZero);
+ addpd(dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_double_2_power_52(), tmp));
+ shufps(dst, scratch, 0x88);
+ }
+ }
+
+ void I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src,
+ Register scratch) {
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i16x8_splat_0x0001(), scratch);
+ // pmaddwd multiplies signed words in src and op, producing
+ // signed doublewords, then adds pairwise.
+ // src = |a|b|c|d|e|f|g|h|
+ // dst = | a*1 + b*1 | c*1 + d*1 | e*1 + f*1 | g*1 + h*1 |
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
+ movaps(dst, src);
+ src = dst;
+ }
+
+ Pmaddwd(dst, src, op);
+ }
+
+ void I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch, Register tmp) {
+ ASM_CODE_COMMENT(this);
+ // pmaddubsw treats the first operand as unsigned, so pass the external
+ // reference to it as the first operand.
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x01(), tmp);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vmovdqa(scratch, op);
+ vpmaddubsw(dst, scratch, src);
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ if (dst == src) {
+ movaps(scratch, op);
+ pmaddubsw(scratch, src);
+ movaps(dst, scratch);
+ } else {
+ movaps(dst, op);
+ pmaddubsw(dst, src);
+ }
+ }
+ }
+
+ void I16x8ExtAddPairwiseI8x16U(XMMRegister dst, XMMRegister src,
+ Register scratch) {
+ ASM_CODE_COMMENT(this);
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x01(), scratch);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpmaddubsw(dst, src, op);
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ pmaddubsw(dst, op);
+ }
+ }
+
+ void I8x16Swizzle(XMMRegister dst, XMMRegister src, XMMRegister mask,
+ XMMRegister scratch, Register tmp, bool omit_add = false) {
+ ASM_CODE_COMMENT(this);
+ if (omit_add) {
+ // We have determined that the indices are immediates, and they are either
+ // within bounds, or the top bit is set, so we can omit the add.
+ Pshufb(dst, src, mask);
+ return;
+ }
+
+ // Out-of-range indices should return 0, add 112 so that any value > 15
+ // saturates to 128 (top bit set), so pshufb will zero that lane.
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_swizzle_mask(), tmp);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpaddusb(scratch, mask, op);
+ vpshufb(dst, src, scratch);
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ movaps(scratch, op);
+ if (dst != src) {
+ DCHECK_NE(dst, mask);
+ movaps(dst, src);
+ }
+ paddusb(scratch, mask);
+ pshufb(dst, scratch);
+ }
+ }
+
+ void I8x16Popcnt(XMMRegister dst, XMMRegister src, XMMRegister tmp1,
+ XMMRegister tmp2, Register scratch) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_NE(dst, tmp1);
+ DCHECK_NE(src, tmp1);
+ DCHECK_NE(dst, tmp2);
+ DCHECK_NE(src, tmp2);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vmovdqa(tmp1, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x0f(),
+ scratch));
+ vpandn(tmp2, tmp1, src);
+ vpand(dst, tmp1, src);
+ vmovdqa(tmp1, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_popcnt_mask(),
+ scratch));
+ vpsrlw(tmp2, tmp2, 4);
+ vpshufb(dst, tmp1, dst);
+ vpshufb(tmp2, tmp1, tmp2);
+ vpaddb(dst, dst, tmp2);
+ } else if (CpuFeatures::IsSupported(ATOM)) {
+ // Pre-Goldmont low-power Intel microarchitectures have very slow
+ // PSHUFB instruction, thus use PSHUFB-free divide-and-conquer
+ // algorithm on these processors. ATOM CPU feature captures exactly
+ // the right set of processors.
+ movaps(tmp1, src);
+ psrlw(tmp1, 1);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ andps(tmp1, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x55(),
+ scratch));
+ psubb(dst, tmp1);
+ Operand splat_0x33 = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x33(), scratch);
+ movaps(tmp1, dst);
+ andps(dst, splat_0x33);
+ psrlw(tmp1, 2);
+ andps(tmp1, splat_0x33);
+ paddb(dst, tmp1);
+ movaps(tmp1, dst);
+ psrlw(dst, 4);
+ paddb(dst, tmp1);
+ andps(dst, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x0f(),
+ scratch));
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ movaps(tmp1, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x0f(),
+ scratch));
+ Operand mask = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_popcnt_mask(), scratch);
+ if (tmp2 != tmp1) {
+ movaps(tmp2, tmp1);
+ }
+ andps(tmp1, src);
+ andnps(tmp2, src);
+ psrlw(tmp2, 4);
+ movaps(dst, mask);
+ pshufb(dst, tmp1);
+ movaps(tmp1, mask);
+ pshufb(tmp1, tmp2);
+ paddb(dst, tmp1);
+ }
+ }
+
+ private:
+ // All implementation-specific methods must be called through this.
+ Impl* impl() { return static_cast<Impl*>(this); }
+
+ Operand ExternalReferenceAsOperand(ExternalReference reference,
+ Register scratch) {
+ return impl()->ExternalReferenceAsOperand(reference, scratch);
+ }
};
+
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_SHARED_IA32_X64_MACRO_ASSEMBLER_SHARED_IA32_X64_H_
diff --git a/deps/v8/src/codegen/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h
index 4d30f01c08..628f8b6eda 100644
--- a/deps/v8/src/codegen/x64/assembler-x64-inl.h
+++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h
@@ -42,8 +42,7 @@ void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
RecordRelocInfo(rmode);
uint32_t offset = static_cast<uint32_t>(entry - options().code_range_start);
if (IsOnHeap()) {
- saved_offsets_for_runtime_entries_.push_back(
- std::make_pair(pc_offset(), offset));
+ saved_offsets_for_runtime_entries_.emplace_back(pc_offset(), offset);
emitl(relative_target_offset(entry, reinterpret_cast<Address>(pc_)));
// We must ensure that `emitl` is not growing the assembler buffer
// and falling back to off-heap compilation.
@@ -66,8 +65,7 @@ void Assembler::emit(Immediate64 x) {
if (x.rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT && IsOnHeap()) {
int offset = pc_offset();
Handle<HeapObject> object(reinterpret_cast<Address*>(x.value_));
- saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(offset, x.value_));
+ saved_handles_for_raw_object_ptr_.emplace_back(offset, x.value_);
emitq(static_cast<uint64_t>(object->ptr()));
DCHECK(EmbeddedObjectMatches(offset, object));
return;
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index 1e66311d95..108f381ba7 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -3347,26 +3347,6 @@ void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
emit_sse_operand(dst, src);
}
-void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
- DCHECK(!IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5A);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::cvtss2sd(XMMRegister dst, Operand src) {
- DCHECK(!IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5A);
- emit_sse_operand(dst, src);
-}
-
void Assembler::cvtsd2si(Register dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3601,6 +3581,14 @@ void Assembler::vmovdqa(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
+void Assembler::vmovdqa(YMMRegister dst, YMMRegister src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, xmm0, src, kL256, k66, k0F, kWIG);
+ emit(0x6F);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::vmovdqu(XMMRegister dst, Operand src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3625,6 +3613,14 @@ void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
+void Assembler::vmovdqu(YMMRegister dst, YMMRegister src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(src, xmm0, dst, kL256, kF3, k0F, kWIG);
+ emit(0x7F);
+ emit_sse_operand(src, dst);
+}
+
void Assembler::vmovlps(XMMRegister dst, XMMRegister src1, Operand src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3688,6 +3684,15 @@ void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
+void Assembler::vps(byte op, YMMRegister dst, YMMRegister src1,
+ YMMRegister src2) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL256, kNone, k0F, kWIG);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3696,6 +3701,14 @@ void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
emit_sse_operand(dst, src2);
}
+void Assembler::vps(byte op, YMMRegister dst, YMMRegister src1, Operand src2) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL256, kNone, k0F, kWIG);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2, byte imm8) {
DCHECK(IsEnabled(AVX));
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index c3d3af100b..cd93c7f856 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -235,6 +235,7 @@ class V8_EXPORT_PRIVATE Operand {
}
Operand(const Operand&) V8_NOEXCEPT = default;
+ Operand& operator=(const Operand&) V8_NOEXCEPT = default;
const Data& data() const { return data_; }
@@ -1241,9 +1242,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cvtqsi2sd(XMMRegister dst, Operand src);
void cvtqsi2sd(XMMRegister dst, Register src);
- void cvtss2sd(XMMRegister dst, XMMRegister src);
- void cvtss2sd(XMMRegister dst, Operand src);
-
void cvtsd2si(Register dst, XMMRegister src);
void cvtsd2siq(Register dst, XMMRegister src);
@@ -1256,14 +1254,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void pmovmskb(Register dst, XMMRegister src);
+ void pinsrw(XMMRegister dst, Register src, uint8_t imm8);
+ void pinsrw(XMMRegister dst, Operand src, uint8_t imm8);
+
// SSE 4.1 instruction
void insertps(XMMRegister dst, XMMRegister src, byte imm8);
void insertps(XMMRegister dst, Operand src, byte imm8);
void pextrq(Register dst, XMMRegister src, int8_t imm8);
void pinsrb(XMMRegister dst, Register src, uint8_t imm8);
void pinsrb(XMMRegister dst, Operand src, uint8_t imm8);
- void pinsrw(XMMRegister dst, Register src, uint8_t imm8);
- void pinsrw(XMMRegister dst, Operand src, uint8_t imm8);
void pinsrd(XMMRegister dst, Register src, uint8_t imm8);
void pinsrd(XMMRegister dst, Operand src, uint8_t imm8);
void pinsrq(XMMRegister dst, Register src, uint8_t imm8);
@@ -1351,9 +1350,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmovsd(Operand dst, XMMRegister src) { vsd(0x11, src, xmm0, dst); }
void vmovdqa(XMMRegister dst, Operand src);
void vmovdqa(XMMRegister dst, XMMRegister src);
+ void vmovdqa(YMMRegister dst, YMMRegister src);
void vmovdqu(XMMRegister dst, Operand src);
void vmovdqu(Operand dst, XMMRegister src);
void vmovdqu(XMMRegister dst, XMMRegister src);
+ void vmovdqu(YMMRegister dst, YMMRegister src);
void vmovlps(XMMRegister dst, XMMRegister src1, Operand src2);
void vmovlps(Operand dst, XMMRegister src);
@@ -1367,6 +1368,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
} \
void v##instr(XMMRegister dst, Operand src2) { \
vps(0x##opcode, dst, xmm0, src2); \
+ } \
+ void v##instr(YMMRegister dst, YMMRegister src2) { \
+ vps(0x##opcode, dst, ymm0, src2); \
+ } \
+ void v##instr(YMMRegister dst, Operand src2) { \
+ vps(0x##opcode, dst, ymm0, src2); \
}
SSE_UNOP_INSTRUCTION_LIST(AVX_SSE_UNOP)
#undef AVX_SSE_UNOP
@@ -1377,6 +1384,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
} \
void v##instr(XMMRegister dst, XMMRegister src1, Operand src2) { \
vps(0x##opcode, dst, src1, src2); \
+ } \
+ void v##instr(YMMRegister dst, YMMRegister src1, YMMRegister src2) { \
+ vps(0x##opcode, dst, src1, src2); \
+ } \
+ void v##instr(YMMRegister dst, YMMRegister src1, Operand src2) { \
+ vps(0x##opcode, dst, src1, src2); \
}
SSE_BINOP_INSTRUCTION_LIST(AVX_SSE_BINOP)
#undef AVX_SSE_BINOP
@@ -1422,12 +1435,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vcvtdq2pd(XMMRegister dst, XMMRegister src) {
vinstr(0xe6, dst, xmm0, src, kF3, k0F, kWIG);
}
- void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
- }
- void vcvtss2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
- }
void vcvttps2dq(XMMRegister dst, XMMRegister src) {
vinstr(0x5b, dst, xmm0, src, kF3, k0F, kWIG);
}
@@ -1590,6 +1597,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
AVX_CMP_P(vcmpneq, 0x4)
AVX_CMP_P(vcmpnlt, 0x5)
AVX_CMP_P(vcmpnle, 0x6)
+ AVX_CMP_P(vcmpge, 0xd)
#undef AVX_CMP_P
@@ -1693,7 +1701,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vps(byte op, YMMRegister dst, YMMRegister src1, YMMRegister src2);
void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
+ void vps(byte op, YMMRegister dst, YMMRegister src1, Operand src2);
void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
byte imm8);
void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
diff --git a/deps/v8/src/codegen/x64/fma-instr.h b/deps/v8/src/codegen/x64/fma-instr.h
index f41c91ee51..c607429e33 100644
--- a/deps/v8/src/codegen/x64/fma-instr.h
+++ b/deps/v8/src/codegen/x64/fma-instr.h
@@ -30,9 +30,17 @@
V(vfnmsub132ss, LIG, 66, 0F, 38, W0, 9f) \
V(vfnmsub213ss, LIG, 66, 0F, 38, W0, af) \
V(vfnmsub231ss, LIG, 66, 0F, 38, W0, bf) \
+ V(vfmadd132ps, L128, 66, 0F, 38, W0, 98) \
+ V(vfmadd213ps, L128, 66, 0F, 38, W0, a8) \
V(vfmadd231ps, L128, 66, 0F, 38, W0, b8) \
+ V(vfnmadd132ps, L128, 66, 0F, 38, W0, 9c) \
+ V(vfnmadd213ps, L128, 66, 0F, 38, W0, ac) \
V(vfnmadd231ps, L128, 66, 0F, 38, W0, bc) \
+ V(vfmadd132pd, L128, 66, 0F, 38, W1, 98) \
+ V(vfmadd213pd, L128, 66, 0F, 38, W1, a8) \
V(vfmadd231pd, L128, 66, 0F, 38, W1, b8) \
+ V(vfnmadd132pd, L128, 66, 0F, 38, W1, 9c) \
+ V(vfnmadd213pd, L128, 66, 0F, 38, W1, ac) \
V(vfnmadd231pd, L128, 66, 0F, 38, W1, bc)
#endif // V8_CODEGEN_X64_FMA_INSTR_H_
diff --git a/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h b/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
index 50ba12b836..fade1eda99 100644
--- a/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
+++ b/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
@@ -43,12 +43,12 @@ constexpr auto WriteBarrierDescriptor::registers() {
#ifdef V8_IS_TSAN
// static
-constexpr auto TSANRelaxedStoreDescriptor::registers() {
+constexpr auto TSANStoreDescriptor::registers() {
return RegisterArray(arg_reg_1, arg_reg_2, kReturnRegister0);
}
// static
-constexpr auto TSANRelaxedLoadDescriptor::registers() {
+constexpr auto TSANLoadDescriptor::registers() {
return RegisterArray(arg_reg_1, kReturnRegister0);
}
#endif // V8_IS_TSAN
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index 5a8dc356b8..f4c498dc10 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -294,6 +294,17 @@ void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand,
}
}
+void TurboAssembler::AtomicStoreTaggedField(Operand dst_field_operand,
+ Register value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ movl(kScratchRegister, value);
+ xchgl(kScratchRegister, dst_field_operand);
+ } else {
+ movq(kScratchRegister, value);
+ xchgq(kScratchRegister, dst_field_operand);
+ }
+}
+
void TurboAssembler::DecompressTaggedSigned(Register destination,
Operand field_operand) {
ASM_CODE_COMMENT(this);
@@ -483,26 +494,27 @@ void TurboAssembler::CallRecordWriteStub(
}
#ifdef V8_IS_TSAN
-void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
- SaveFPRegsMode fp_mode, int size,
- StubCallMode mode) {
+void TurboAssembler::CallTSANStoreStub(Register address, Register value,
+ SaveFPRegsMode fp_mode, int size,
+ StubCallMode mode,
+ std::memory_order order) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(address, value));
- TSANRelaxedStoreDescriptor descriptor;
+ TSANStoreDescriptor descriptor;
RegList registers = descriptor.allocatable_registers();
MaybeSaveRegisters(registers);
Register address_parameter(
- descriptor.GetRegisterParameter(TSANRelaxedStoreDescriptor::kAddress));
+ descriptor.GetRegisterParameter(TSANStoreDescriptor::kAddress));
Register value_parameter(
- descriptor.GetRegisterParameter(TSANRelaxedStoreDescriptor::kValue));
+ descriptor.GetRegisterParameter(TSANStoreDescriptor::kValue));
- // Prepare argument registers for calling GetTSANRelaxedStoreStub.
+ // Prepare argument registers for calling GetTSANStoreStub.
MovePair(address_parameter, address, value_parameter, value);
if (isolate()) {
- Builtin builtin = CodeFactory::GetTSANRelaxedStoreStub(fp_mode, size);
+ Builtin builtin = CodeFactory::GetTSANStoreStub(fp_mode, size, order);
Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET);
}
@@ -520,7 +532,7 @@ void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
else {
DCHECK_EQ(mode, StubCallMode::kCallWasmRuntimeStub);
// Use {near_call} for direct Wasm call within a module.
- auto wasm_target = wasm::WasmCode::GetTSANRelaxedStoreStub(fp_mode, size);
+ auto wasm_target = wasm::WasmCode::GetTSANStoreStub(fp_mode, size, order);
near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -531,13 +543,13 @@ void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
void TurboAssembler::CallTSANRelaxedLoadStub(Register address,
SaveFPRegsMode fp_mode, int size,
StubCallMode mode) {
- TSANRelaxedLoadDescriptor descriptor;
+ TSANLoadDescriptor descriptor;
RegList registers = descriptor.allocatable_registers();
MaybeSaveRegisters(registers);
Register address_parameter(
- descriptor.GetRegisterParameter(TSANRelaxedLoadDescriptor::kAddress));
+ descriptor.GetRegisterParameter(TSANLoadDescriptor::kAddress));
// Prepare argument registers for calling TSANRelaxedLoad.
Move(address_parameter, address);
@@ -847,6 +859,99 @@ void TurboAssembler::Movq(Register dst, XMMRegister src) {
}
}
+// Helper macro to define qfma macro-assembler. This takes care of every
+// possible case of register aliasing to minimize the number of instructions.
+#define QFMA(ps_or_pd) \
+ if (CpuFeatures::IsSupported(FMA3)) { \
+ CpuFeatureScope fma3_scope(this, FMA3); \
+ if (dst == src1) { \
+ vfmadd231##ps_or_pd(dst, src2, src3); \
+ } else if (dst == src2) { \
+ vfmadd132##ps_or_pd(dst, src1, src3); \
+ } else if (dst == src3) { \
+ vfmadd213##ps_or_pd(dst, src2, src1); \
+ } else { \
+ vmovups(dst, src1); \
+ vfmadd231##ps_or_pd(dst, src2, src3); \
+ } \
+ } else if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(this, AVX); \
+ vmul##ps_or_pd(tmp, src2, src3); \
+ vadd##ps_or_pd(dst, src1, tmp); \
+ } else { \
+ if (dst == src1) { \
+ movaps(tmp, src2); \
+ mul##ps_or_pd(tmp, src3); \
+ add##ps_or_pd(dst, tmp); \
+ } else if (dst == src2) { \
+ DCHECK_NE(src2, src1); \
+ mul##ps_or_pd(src2, src3); \
+ add##ps_or_pd(src2, src1); \
+ } else if (dst == src3) { \
+ DCHECK_NE(src3, src1); \
+ mul##ps_or_pd(src3, src2); \
+ add##ps_or_pd(src3, src1); \
+ } else { \
+ movaps(dst, src2); \
+ mul##ps_or_pd(dst, src3); \
+ add##ps_or_pd(dst, src1); \
+ } \
+ }
+
+// Helper macro to define qfms macro-assembler. This takes care of every
+// possible case of register aliasing to minimize the number of instructions.
+#define QFMS(ps_or_pd) \
+ if (CpuFeatures::IsSupported(FMA3)) { \
+ CpuFeatureScope fma3_scope(this, FMA3); \
+ if (dst == src1) { \
+ vfnmadd231##ps_or_pd(dst, src2, src3); \
+ } else if (dst == src2) { \
+ vfnmadd132##ps_or_pd(dst, src1, src3); \
+ } else if (dst == src3) { \
+ vfnmadd213##ps_or_pd(dst, src2, src1); \
+ } else { \
+ vmovups(dst, src1); \
+ vfnmadd231##ps_or_pd(dst, src2, src3); \
+ } \
+ } else if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(this, AVX); \
+ vmul##ps_or_pd(tmp, src2, src3); \
+ vsub##ps_or_pd(dst, src1, tmp); \
+ } else { \
+ movaps(tmp, src2); \
+ mul##ps_or_pd(tmp, src3); \
+ if (dst != src1) { \
+ movaps(dst, src1); \
+ } \
+ sub##ps_or_pd(dst, tmp); \
+ }
+
+void TurboAssembler::F32x4Qfma(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMA(ps)
+}
+
+void TurboAssembler::F32x4Qfms(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMS(ps)
+}
+
+void TurboAssembler::F64x2Qfma(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMA(pd);
+}
+
+void TurboAssembler::F64x2Qfms(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMS(pd);
+}
+
+#undef QFMOP
+
void TurboAssembler::Movdqa(XMMRegister dst, Operand src) {
// See comments in Movdqa(XMMRegister, XMMRegister).
if (CpuFeatures::IsSupported(AVX)) {
@@ -1551,16 +1656,6 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t high, uint64_t low) {
// ----------------------------------------------------------------------------
-void MacroAssembler::Absps(XMMRegister dst) {
- Andps(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_float_abs_constant()));
-}
-
-void MacroAssembler::Negps(XMMRegister dst) {
- Xorps(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_float_neg_constant()));
-}
-
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
@@ -1993,100 +2088,6 @@ void TurboAssembler::JumpCodeTObject(Register code, JumpMode jump_mode) {
}
}
-void TurboAssembler::RetpolineCall(Register reg) {
- ASM_CODE_COMMENT(this);
- Label setup_return, setup_target, inner_indirect_branch, capture_spec;
-
- jmp(&setup_return); // Jump past the entire retpoline below.
-
- bind(&inner_indirect_branch);
- call(&setup_target);
-
- bind(&capture_spec);
- pause();
- jmp(&capture_spec);
-
- bind(&setup_target);
- movq(Operand(rsp, 0), reg);
- ret(0);
-
- bind(&setup_return);
- call(&inner_indirect_branch); // Callee will return after this instruction.
-}
-
-void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
- Move(kScratchRegister, destination, rmode);
- RetpolineCall(kScratchRegister);
-}
-
-void TurboAssembler::RetpolineJump(Register reg) {
- ASM_CODE_COMMENT(this);
- Label setup_target, capture_spec;
-
- call(&setup_target);
-
- bind(&capture_spec);
- pause();
- jmp(&capture_spec);
-
- bind(&setup_target);
- movq(Operand(rsp, 0), reg);
- ret(0);
-}
-
-void TurboAssembler::Pmaddwd(XMMRegister dst, XMMRegister src1, Operand src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmaddwd(dst, src1, src2);
- } else {
- if (dst != src1) {
- movaps(dst, src1);
- }
- pmaddwd(dst, src2);
- }
-}
-
-void TurboAssembler::Pmaddwd(XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmaddwd(dst, src1, src2);
- } else {
- if (dst != src1) {
- movaps(dst, src1);
- }
- pmaddwd(dst, src2);
- }
-}
-
-void TurboAssembler::Pmaddubsw(XMMRegister dst, XMMRegister src1,
- Operand src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmaddubsw(dst, src1, src2);
- } else {
- CpuFeatureScope ssse3_scope(this, SSSE3);
- if (dst != src1) {
- movaps(dst, src1);
- }
- pmaddubsw(dst, src2);
- }
-}
-
-void TurboAssembler::Pmaddubsw(XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmaddubsw(dst, src1, src2);
- } else {
- CpuFeatureScope ssse3_scope(this, SSSE3);
- if (dst != src1) {
- movaps(dst, src1);
- }
- pmaddubsw(dst, src2);
- }
-}
-
void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
@@ -2116,16 +2117,17 @@ using NoAvxFn = void (Assembler::*)(XMMRegister, Src, uint8_t);
template <typename Src>
void PinsrHelper(Assembler* assm, AvxFn<Src> avx, NoAvxFn<Src> noavx,
XMMRegister dst, XMMRegister src1, Src src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr,
base::Optional<CpuFeature> feature = base::nullopt) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(assm, AVX);
+ if (load_pc_offset) *load_pc_offset = assm->pc_offset();
(assm->*avx)(dst, src1, src2, imm8);
return;
}
- if (dst != src1) {
- assm->movaps(dst, src1);
- }
+ if (dst != src1) assm->movaps(dst, src1);
+ if (load_pc_offset) *load_pc_offset = assm->pc_offset();
if (feature.has_value()) {
DCHECK(CpuFeatures::IsSupported(*feature));
CpuFeatureScope scope(assm, *feature);
@@ -2137,40 +2139,41 @@ void PinsrHelper(Assembler* assm, AvxFn<Src> avx, NoAvxFn<Src> noavx,
} // namespace
void TurboAssembler::Pinsrb(XMMRegister dst, XMMRegister src1, Register src2,
- uint8_t imm8) {
+ uint8_t imm8, uint32_t* load_pc_offset) {
PinsrHelper(this, &Assembler::vpinsrb, &Assembler::pinsrb, dst, src1, src2,
- imm8, base::Optional<CpuFeature>(SSE4_1));
+ imm8, load_pc_offset, {SSE4_1});
}
void TurboAssembler::Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2,
- uint8_t imm8) {
+ uint8_t imm8, uint32_t* load_pc_offset) {
PinsrHelper(this, &Assembler::vpinsrb, &Assembler::pinsrb, dst, src1, src2,
- imm8, base::Optional<CpuFeature>(SSE4_1));
+ imm8, load_pc_offset, {SSE4_1});
}
void TurboAssembler::Pinsrw(XMMRegister dst, XMMRegister src1, Register src2,
- uint8_t imm8) {
+ uint8_t imm8, uint32_t* load_pc_offset) {
PinsrHelper(this, &Assembler::vpinsrw, &Assembler::pinsrw, dst, src1, src2,
- imm8);
+ imm8, load_pc_offset);
}
void TurboAssembler::Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2,
- uint8_t imm8) {
+ uint8_t imm8, uint32_t* load_pc_offset) {
PinsrHelper(this, &Assembler::vpinsrw, &Assembler::pinsrw, dst, src1, src2,
- imm8);
+ imm8, load_pc_offset);
}
void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Register src2,
- uint8_t imm8) {
+ uint8_t imm8, uint32_t* load_pc_offset) {
// Need a fall back when SSE4_1 is unavailable. Pinsrb and Pinsrq are used
// only by Wasm SIMD, which requires SSE4_1 already.
if (CpuFeatures::IsSupported(SSE4_1)) {
PinsrHelper(this, &Assembler::vpinsrd, &Assembler::pinsrd, dst, src1, src2,
- imm8, base::Optional<CpuFeature>(SSE4_1));
+ imm8, load_pc_offset, {SSE4_1});
return;
}
Movd(kScratchDoubleReg, src2);
+ if (load_pc_offset) *load_pc_offset = pc_offset();
if (imm8 == 1) {
punpckldq(dst, kScratchDoubleReg);
} else {
@@ -2180,16 +2183,17 @@ void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Register src2,
}
void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
- uint8_t imm8) {
+ uint8_t imm8, uint32_t* load_pc_offset) {
// Need a fall back when SSE4_1 is unavailable. Pinsrb and Pinsrq are used
// only by Wasm SIMD, which requires SSE4_1 already.
if (CpuFeatures::IsSupported(SSE4_1)) {
PinsrHelper(this, &Assembler::vpinsrd, &Assembler::pinsrd, dst, src1, src2,
- imm8, base::Optional<CpuFeature>(SSE4_1));
+ imm8, load_pc_offset, {SSE4_1});
return;
}
Movd(kScratchDoubleReg, src2);
+ if (load_pc_offset) *load_pc_offset = pc_offset();
if (imm8 == 1) {
punpckldq(dst, kScratchDoubleReg);
} else {
@@ -2198,361 +2202,66 @@ void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
}
}
-void TurboAssembler::Pinsrd(XMMRegister dst, Register src2, uint8_t imm8) {
- Pinsrd(dst, dst, src2, imm8);
+void TurboAssembler::Pinsrd(XMMRegister dst, Register src2, uint8_t imm8,
+ uint32_t* load_pc_offset) {
+ Pinsrd(dst, dst, src2, imm8, load_pc_offset);
}
-void TurboAssembler::Pinsrd(XMMRegister dst, Operand src2, uint8_t imm8) {
- Pinsrd(dst, dst, src2, imm8);
+void TurboAssembler::Pinsrd(XMMRegister dst, Operand src2, uint8_t imm8,
+ uint32_t* load_pc_offset) {
+ Pinsrd(dst, dst, src2, imm8, load_pc_offset);
}
void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Register src2,
- uint8_t imm8) {
+ uint8_t imm8, uint32_t* load_pc_offset) {
PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2,
- imm8, base::Optional<CpuFeature>(SSE4_1));
+ imm8, load_pc_offset, {SSE4_1});
}
void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2,
- uint8_t imm8) {
+ uint8_t imm8, uint32_t* load_pc_offset) {
PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2,
- imm8, base::Optional<CpuFeature>(SSE4_1));
+ imm8, load_pc_offset, {SSE4_1});
}
-void TurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister mask) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpblendvb(dst, src1, src2, mask);
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- DCHECK_EQ(dst, src1);
- DCHECK_EQ(xmm0, mask);
- pblendvb(dst, src2);
- }
-}
-
-void TurboAssembler::Blendvps(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister mask) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vblendvps(dst, src1, src2, mask);
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- DCHECK_EQ(dst, src1);
- DCHECK_EQ(xmm0, mask);
- blendvps(dst, src2);
- }
-}
-
-void TurboAssembler::Blendvpd(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister mask) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vblendvpd(dst, src1, src2, mask);
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- DCHECK_EQ(dst, src1);
- DCHECK_EQ(xmm0, mask);
- blendvpd(dst, src2);
- }
-}
-
-void TurboAssembler::Pshufb(XMMRegister dst, XMMRegister src,
- XMMRegister mask) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpshufb(dst, src, mask);
- } else {
- // Make sure these are different so that we won't overwrite mask.
- DCHECK_NE(dst, mask);
- if (dst != src) {
- movaps(dst, src);
- }
- CpuFeatureScope sse_scope(this, SSSE3);
- pshufb(dst, mask);
- }
-}
-
-void TurboAssembler::Pmulhrsw(XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmulhrsw(dst, src1, src2);
- } else {
- if (dst != src1) {
- Movdqa(dst, src1);
- }
- CpuFeatureScope sse_scope(this, SSSE3);
- pmulhrsw(dst, src2);
- }
-}
-
-void TurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- // k = i16x8.splat(0x8000)
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psllw(kScratchDoubleReg, byte{15});
-
- Pmulhrsw(dst, src1, src2);
- Pcmpeqw(kScratchDoubleReg, dst);
- Pxor(dst, kScratchDoubleReg);
-}
-
-void TurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
- uint8_t laneidx) {
- if (laneidx == 0) {
- Movlps(dst, src);
- } else {
- DCHECK_EQ(1, laneidx);
- Movhps(dst, src);
- }
-}
-
-void TurboAssembler::I8x16Popcnt(XMMRegister dst, XMMRegister src,
- XMMRegister tmp) {
- DCHECK_NE(dst, tmp);
- DCHECK_NE(src, tmp);
- DCHECK_NE(kScratchDoubleReg, tmp);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovdqa(tmp, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x0f()));
- vpandn(kScratchDoubleReg, tmp, src);
- vpand(dst, tmp, src);
- vmovdqa(tmp, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_popcnt_mask()));
- vpsrlw(kScratchDoubleReg, kScratchDoubleReg, 4);
- vpshufb(dst, tmp, dst);
- vpshufb(kScratchDoubleReg, tmp, kScratchDoubleReg);
- vpaddb(dst, dst, kScratchDoubleReg);
- } else if (CpuFeatures::IsSupported(ATOM)) {
- // Pre-Goldmont low-power Intel microarchitectures have very slow
- // PSHUFB instruction, thus use PSHUFB-free divide-and-conquer
- // algorithm on these processors. ATOM CPU feature captures exactly
- // the right set of processors.
- movaps(tmp, src);
- psrlw(tmp, 1);
- if (dst != src) {
- movaps(dst, src);
- }
- andps(tmp, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x55()));
- psubb(dst, tmp);
- Operand splat_0x33 = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x33());
- movaps(tmp, dst);
- andps(dst, splat_0x33);
- psrlw(tmp, 2);
- andps(tmp, splat_0x33);
- paddb(dst, tmp);
- movaps(tmp, dst);
- psrlw(dst, 4);
- paddb(dst, tmp);
- andps(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x0f()));
- } else {
- movaps(tmp, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x0f()));
- Operand mask = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_popcnt_mask());
- Move(kScratchDoubleReg, tmp);
- andps(tmp, src);
- andnps(kScratchDoubleReg, src);
- psrlw(kScratchDoubleReg, 4);
- movaps(dst, mask);
- pshufb(dst, tmp);
- movaps(tmp, mask);
- pshufb(tmp, kScratchDoubleReg);
- paddb(dst, tmp);
- }
-}
-
-void TurboAssembler::F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src) {
- // dst = [ src_low, 0x43300000, src_high, 0x4330000 ];
- // 0x43300000'00000000 is a special double where the significand bits
- // precisely represents all uint32 numbers.
+void TurboAssembler::Absps(XMMRegister dst, XMMRegister src) {
if (!CpuFeatures::IsSupported(AVX) && dst != src) {
movaps(dst, src);
src = dst;
}
- Unpcklps(dst, src,
- ExternalReferenceAsOperand(
- ExternalReference::
- address_of_wasm_f64x2_convert_low_i32x4_u_int_mask()));
- Subpd(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_double_2_power_52()));
-}
-
-void TurboAssembler::I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- XMMRegister original_dst = dst;
- // Make sure we don't overwrite src.
- if (dst == src) {
- DCHECK_NE(src, kScratchDoubleReg);
- dst = kScratchDoubleReg;
- }
- // dst = 0 if src == NaN, else all ones.
- vcmpeqpd(dst, src, src);
- // dst = 0 if src == NaN, else INT32_MAX as double.
- vandpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_int32_max_as_double()));
- // dst = 0 if src == NaN, src is saturated to INT32_MAX as double.
- vminpd(dst, src, dst);
- // Values > INT32_MAX already saturated, values < INT32_MIN raises an
- // exception, which is masked and returns 0x80000000.
- vcvttpd2dq(dst, dst);
- if (original_dst != dst) {
- Move(original_dst, dst);
- }
- } else {
- if (dst != src) {
- Move(dst, src);
- }
- Move(kScratchDoubleReg, dst);
- cmpeqpd(kScratchDoubleReg, dst);
- andps(kScratchDoubleReg,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_int32_max_as_double()));
- minpd(dst, kScratchDoubleReg);
- cvttpd2dq(dst, dst);
- }
+ Andps(dst, src,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_float_abs_constant()));
}
-void TurboAssembler::I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vxorpd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- // Saturate to 0.
- vmaxpd(dst, src, kScratchDoubleReg);
- // Saturate to UINT32_MAX.
- vminpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_uint32_max_as_double()));
- // Truncate.
- vroundpd(dst, dst, kRoundToZero);
- // Add to special double where significant bits == uint32.
- vaddpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_double_2_power_52()));
- // Extract low 32 bits of each double's significand, zero top lanes.
- // dst = [dst[0], dst[2], 0, 0]
- vshufps(dst, dst, kScratchDoubleReg, 0x88);
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- if (dst != src) {
- Move(dst, src);
- }
- xorps(kScratchDoubleReg, kScratchDoubleReg);
- maxpd(dst, kScratchDoubleReg);
- minpd(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_uint32_max_as_double()));
- roundpd(dst, dst, kRoundToZero);
- addpd(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_double_2_power_52()));
- shufps(dst, kScratchDoubleReg, 0x88);
- }
-}
-
-void TurboAssembler::I16x8ExtAddPairwiseI8x16S(XMMRegister dst,
- XMMRegister src) {
- // pmaddubsw treats the first operand as unsigned, so the external reference
- // to be passed to it as the first operand.
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x01());
- if (dst == src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovdqa(kScratchDoubleReg, op);
- vpmaddubsw(dst, kScratchDoubleReg, src);
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- movaps(kScratchDoubleReg, op);
- pmaddubsw(kScratchDoubleReg, src);
- movaps(dst, kScratchDoubleReg);
- }
- } else {
- Movdqa(dst, op);
- Pmaddubsw(dst, dst, src);
+void TurboAssembler::Negps(XMMRegister dst, XMMRegister src) {
+ if (!CpuFeatures::IsSupported(AVX) && dst != src) {
+ movaps(dst, src);
+ src = dst;
}
+ Xorps(dst, src,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_float_neg_constant()));
}
-void TurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
- XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- // src = |a|b|c|d|e|f|g|h| (low)
- // scratch = |0|a|0|c|0|e|0|g|
- vpsrld(kScratchDoubleReg, src, 16);
- // dst = |0|b|0|d|0|f|0|h|
- vpblendw(dst, src, kScratchDoubleReg, 0xAA);
- // dst = |a+b|c+d|e+f|g+h|
- vpaddd(dst, kScratchDoubleReg, dst);
- } else if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- // There is a potentially better lowering if we get rip-relative constants,
- // see https://github.com/WebAssembly/simd/pull/380.
- movaps(kScratchDoubleReg, src);
- psrld(kScratchDoubleReg, 16);
- if (dst != src) {
- movaps(dst, src);
- }
- pblendw(dst, kScratchDoubleReg, 0xAA);
- paddd(dst, kScratchDoubleReg);
- } else {
- // src = |a|b|c|d|e|f|g|h|
- // kScratchDoubleReg = i32x4.splat(0x0000FFFF)
- pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- psrld(kScratchDoubleReg, byte{16});
- // kScratchDoubleReg =|0|b|0|d|0|f|0|h|
- andps(kScratchDoubleReg, src);
- // dst = |0|a|0|c|0|e|0|g|
- if (dst != src) {
- movaps(dst, src);
- }
- psrld(dst, byte{16});
- // dst = |a+b|c+d|e+f|g+h|
- paddd(dst, kScratchDoubleReg);
+void TurboAssembler::Abspd(XMMRegister dst, XMMRegister src) {
+ if (!CpuFeatures::IsSupported(AVX) && dst != src) {
+ movaps(dst, src);
+ src = dst;
}
+ Andps(dst, src,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_double_abs_constant()));
}
-void TurboAssembler::I8x16Swizzle(XMMRegister dst, XMMRegister src,
- XMMRegister mask, bool omit_add) {
- if (omit_add) {
- // We have determined that the indices are immediates, and they are either
- // within bounds, or the top bit is set, so we can omit the add.
- Pshufb(dst, src, mask);
- return;
- }
-
- // Out-of-range indices should return 0, add 112 so that any value > 15
- // saturates to 128 (top bit set), so pshufb will zero that lane.
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_swizzle_mask());
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpaddusb(kScratchDoubleReg, mask, op);
- vpshufb(dst, src, kScratchDoubleReg);
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- movaps(kScratchDoubleReg, op);
- if (dst != src) {
- movaps(dst, src);
- }
- paddusb(kScratchDoubleReg, mask);
- pshufb(dst, kScratchDoubleReg);
+void TurboAssembler::Negpd(XMMRegister dst, XMMRegister src) {
+ if (!CpuFeatures::IsSupported(AVX) && dst != src) {
+ movaps(dst, src);
+ src = dst;
}
-}
-
-void TurboAssembler::Abspd(XMMRegister dst) {
- Andps(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_double_abs_constant()));
-}
-
-void TurboAssembler::Negpd(XMMRegister dst) {
- Xorps(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_double_neg_constant()));
+ Xorps(dst, src,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_double_neg_constant()));
}
void TurboAssembler::Lzcntl(Register dst, Register src) {
@@ -2794,8 +2503,7 @@ void MacroAssembler::AssertCodeT(Register object) {
Check(not_equal, AbortReason::kOperandIsNotACodeT);
Push(object);
LoadMap(object, object);
- CmpInstanceType(object, V8_EXTERNAL_CODE_SPACE_BOOL ? CODE_DATA_CONTAINER_TYPE
- : CODE_TYPE);
+ CmpInstanceType(object, CODET_TYPE);
Pop(object);
Check(equal, AbortReason::kOperandIsNotACodeT);
}
@@ -3067,8 +2775,12 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
leaq(kScratchRegister,
Operand(expected_parameter_count, times_system_pointer_size, 0));
AllocateStackSpace(kScratchRegister);
- // Extra words are the receiver and the return address (if a jump).
- int extra_words = type == InvokeType::kCall ? 1 : 2;
+ // Extra words are the receiver (if not already included in argc) and the
+ // return address (if a jump).
+ int extra_words =
+ type == InvokeType::kCall ? 0 : kReturnAddressStackSlotCount;
+ if (!kJSArgcIncludesReceiver) extra_words++;
+
leaq(num, Operand(rax, extra_words)); // Number of words to copy.
Move(current, 0);
// Fall-through to the loop body because there are non-zero words to copy.
@@ -3523,11 +3235,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
leaq(dst, Operand(&current, -pc));
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- // TODO(turbofan): Perhaps, we want to put an lfence here.
- Move(kSpeculationPoisonRegister, -1);
-}
-
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index 02b9eb410e..ec35108aba 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -57,53 +57,23 @@ class StackArgumentsAccessor {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
};
-class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
+class V8_EXPORT_PRIVATE TurboAssembler
+ : public SharedTurboAssemblerBase<TurboAssembler> {
public:
- using SharedTurboAssembler::SharedTurboAssembler;
- AVX_OP(Subsd, subsd)
- AVX_OP(Divss, divss)
- AVX_OP(Divsd, divsd)
- AVX_OP(Pcmpgtw, pcmpgtw)
- AVX_OP(Pmaxsw, pmaxsw)
- AVX_OP(Pminsw, pminsw)
- AVX_OP(Addss, addss)
- AVX_OP(Addsd, addsd)
- AVX_OP(Mulsd, mulsd)
- AVX_OP(Cmpeqps, cmpeqps)
- AVX_OP(Cmpltps, cmpltps)
- AVX_OP(Cmpneqps, cmpneqps)
- AVX_OP(Cmpnltps, cmpnltps)
- AVX_OP(Cmpnleps, cmpnleps)
- AVX_OP(Cmpnltpd, cmpnltpd)
- AVX_OP(Cmpnlepd, cmpnlepd)
- AVX_OP(Cvttpd2dq, cvttpd2dq)
+ using SharedTurboAssemblerBase<TurboAssembler>::SharedTurboAssemblerBase;
AVX_OP(Ucomiss, ucomiss)
AVX_OP(Ucomisd, ucomisd)
- AVX_OP(Psubsw, psubsw)
- AVX_OP(Psubusw, psubusw)
- AVX_OP(Paddsw, paddsw)
- AVX_OP(Pcmpgtd, pcmpgtd)
AVX_OP(Pcmpeqb, pcmpeqb)
AVX_OP(Pcmpeqw, pcmpeqw)
AVX_OP(Pcmpeqd, pcmpeqd)
AVX_OP(Movlhps, movlhps)
- AVX_OP_SSSE3(Phaddd, phaddd)
- AVX_OP_SSSE3(Phaddw, phaddw)
- AVX_OP_SSSE3(Pshufb, pshufb)
AVX_OP_SSE4_1(Pcmpeqq, pcmpeqq)
AVX_OP_SSE4_1(Packusdw, packusdw)
- AVX_OP_SSE4_1(Pminsd, pminsd)
- AVX_OP_SSE4_1(Pminuw, pminuw)
- AVX_OP_SSE4_1(Pminud, pminud)
- AVX_OP_SSE4_1(Pmaxuw, pmaxuw)
- AVX_OP_SSE4_1(Pmaxud, pmaxud)
- AVX_OP_SSE4_1(Pmulld, pmulld)
AVX_OP_SSE4_1(Insertps, insertps)
AVX_OP_SSE4_1(Pinsrq, pinsrq)
AVX_OP_SSE4_1(Pextrq, pextrq)
AVX_OP_SSE4_1(Roundss, roundss)
AVX_OP_SSE4_1(Roundsd, roundsd)
- AVX_OP_SSE4_2(Pcmpgtq, pcmpgtq)
#undef AVX_OP
@@ -113,6 +83,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Movq(XMMRegister dst, Register src);
void Movq(Register dst, XMMRegister src);
+ void F64x2Qfma(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+ void F64x2Qfms(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+ void F32x4Qfma(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+ void F32x4Qfms(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+
void PushReturnAddressFrom(Register src) { pushq(src); }
void PopReturnAddressTo(Register dst) { popq(dst); }
@@ -432,17 +411,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void CallCodeTObject(Register code);
void JumpCodeTObject(Register code, JumpMode jump_mode = JumpMode::kJump);
- void RetpolineCall(Register reg);
- void RetpolineCall(Address destination, RelocInfo::Mode rmode);
-
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(const ExternalReference& reference);
void Jump(Operand op);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
Condition cc = always);
- void RetpolineJump(Register reg);
-
void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
DeoptimizeKind kind, Label* ret,
Label* jump_deoptimization_entry_label);
@@ -450,58 +424,34 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Trap();
void DebugBreak();
- // Will move src1 to dst if dst != src1.
- void Pmaddwd(XMMRegister dst, XMMRegister src1, Operand src2);
- void Pmaddwd(XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void Pmaddubsw(XMMRegister dst, XMMRegister src1, Operand src2);
- void Pmaddubsw(XMMRegister dst, XMMRegister src1, XMMRegister src2);
-
// Non-SSE2 instructions.
void Pextrd(Register dst, XMMRegister src, uint8_t imm8);
- void Pinsrb(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
- void Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
- void Pinsrw(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
- void Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
- void Pinsrd(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
- void Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
- void Pinsrd(XMMRegister dst, Register src2, uint8_t imm8);
- void Pinsrd(XMMRegister dst, Operand src2, uint8_t imm8);
- void Pinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
- void Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
-
- void Pblendvb(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister mask);
- void Blendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister mask);
- void Blendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister mask);
-
- // Supports both SSE and AVX. Move src1 to dst if they are not equal on SSE.
- void Pshufb(XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void Pmulhrsw(XMMRegister dst, XMMRegister src1, XMMRegister src2);
-
- // These Wasm SIMD ops do not have direct lowerings on x64. These
- // helpers are optimized to produce the fastest and smallest codegen.
- // Defined here to allow usage on both TurboFan and Liftoff.
- void I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, XMMRegister src2);
-
- void S128Store64Lane(Operand dst, XMMRegister src, uint8_t laneidx);
-
- void I8x16Popcnt(XMMRegister dst, XMMRegister src, XMMRegister tmp);
-
- void F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src);
- void I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src);
- void I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src);
-
- void I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src);
- void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src);
-
- void I8x16Swizzle(XMMRegister dst, XMMRegister src, XMMRegister mask,
- bool omit_add = false);
-
- void Abspd(XMMRegister dst);
- void Negpd(XMMRegister dst);
+ void Pinsrb(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrw(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrd(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrd(XMMRegister dst, Register src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrd(XMMRegister dst, Operand src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+
+ void Absps(XMMRegister dst, XMMRegister src);
+ void Negps(XMMRegister dst, XMMRegister src);
+ void Abspd(XMMRegister dst, XMMRegister src);
+ void Negpd(XMMRegister dst, XMMRegister src);
void CompareRoot(Register with, RootIndex index);
void CompareRoot(Operand with, RootIndex index);
@@ -595,9 +545,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
StubCallMode mode = StubCallMode::kCallBuiltinPointer);
#ifdef V8_IS_TSAN
- void CallTSANRelaxedStoreStub(Register address, Register value,
- SaveFPRegsMode fp_mode, int size,
- StubCallMode mode);
+ void CallTSANStoreStub(Register address, Register value,
+ SaveFPRegsMode fp_mode, int size, StubCallMode mode,
+ std::memory_order order);
void CallTSANRelaxedLoadStub(Register address, SaveFPRegsMode fp_mode,
int size, StubCallMode mode);
#endif // V8_IS_TSAN
@@ -632,8 +582,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- void ResetSpeculationPoisonRegister();
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
@@ -676,6 +624,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void StoreTaggedField(Operand dst_field_operand, Immediate immediate);
void StoreTaggedField(Operand dst_field_operand, Register value);
void StoreTaggedSignedField(Operand dst_field_operand, Smi value);
+ void AtomicStoreTaggedField(Operand dst_field_operand, Register value);
// The following macros work even when pointer compression is not enabled.
void DecompressTaggedSigned(Register destination, Operand field_operand);
@@ -851,10 +800,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void Pop(Operand dst);
void PopQuad(Operand dst);
- // ---------------------------------------------------------------------------
- // SIMD macros.
- void Absps(XMMRegister dst);
- void Negps(XMMRegister dst);
// Generates a trampoline to jump to the off-heap instruction stream.
void JumpToInstructionStream(Address entry);
diff --git a/deps/v8/src/codegen/x64/register-x64.h b/deps/v8/src/codegen/x64/register-x64.h
index 61e7ccf396..f36763f2e4 100644
--- a/deps/v8/src/codegen/x64/register-x64.h
+++ b/deps/v8/src/codegen/x64/register-x64.h
@@ -155,6 +155,24 @@ constexpr Register arg_reg_4 = rcx;
V(xmm13) \
V(xmm14)
+#define YMM_REGISTERS(V) \
+ V(ymm0) \
+ V(ymm1) \
+ V(ymm2) \
+ V(ymm3) \
+ V(ymm4) \
+ V(ymm5) \
+ V(ymm6) \
+ V(ymm7) \
+ V(ymm8) \
+ V(ymm9) \
+ V(ymm10) \
+ V(ymm11) \
+ V(ymm12) \
+ V(ymm13) \
+ V(ymm14) \
+ V(ymm15)
+
// Returns the number of padding slots needed for stack pointer alignment.
constexpr int ArgumentPaddingSlots(int argument_count) {
// No argument padding required.
@@ -171,6 +189,17 @@ enum DoubleRegisterCode {
kDoubleAfterLast
};
+enum YMMRegisterCode {
+#define REGISTER_CODE(R) kYMMCode_##R,
+ YMM_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kYMMAfterLast
+};
+static_assert(static_cast<int>(kDoubleAfterLast) ==
+ static_cast<int>(kYMMAfterLast),
+ "The number of XMM register codes must match the number of YMM "
+ "register codes");
+
class XMMRegister : public RegisterBase<XMMRegister, kDoubleAfterLast> {
public:
// Return the high bit of the register code as a 0 or 1. Used often
@@ -180,7 +209,7 @@ class XMMRegister : public RegisterBase<XMMRegister, kDoubleAfterLast> {
// in modR/M, SIB, and opcode bytes.
int low_bits() const { return code() & 0x7; }
- private:
+ protected:
friend class RegisterBase<XMMRegister, kDoubleAfterLast>;
explicit constexpr XMMRegister(int code) : RegisterBase(code) {}
};
@@ -189,6 +218,22 @@ ASSERT_TRIVIALLY_COPYABLE(XMMRegister);
static_assert(sizeof(XMMRegister) == sizeof(int),
"XMMRegister can efficiently be passed by value");
+class YMMRegister : public XMMRegister {
+ public:
+ static constexpr YMMRegister from_code(int code) {
+ DCHECK(base::IsInRange(code, 0, XMMRegister::kNumRegisters - 1));
+ return YMMRegister(code);
+ }
+
+ private:
+ friend class XMMRegister;
+ explicit constexpr YMMRegister(int code) : XMMRegister(code) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(YMMRegister);
+static_assert(sizeof(YMMRegister) == sizeof(int),
+ "YMMRegister can efficiently be passed by value");
+
using FloatRegister = XMMRegister;
using DoubleRegister = XMMRegister;
@@ -201,9 +246,15 @@ DOUBLE_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
+#define DECLARE_REGISTER(R) \
+ constexpr YMMRegister R = YMMRegister::from_code(kYMMCode_##R);
+YMM_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+
// Define {RegisterName} methods for the register types.
DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
DEFINE_REGISTER_NAMES(XMMRegister, DOUBLE_REGISTERS)
+DEFINE_REGISTER_NAMES(YMMRegister, YMM_REGISTERS)
// Give alias names to registers for calling conventions.
constexpr Register kReturnRegister0 = rax;
@@ -212,7 +263,6 @@ constexpr Register kReturnRegister2 = r8;
constexpr Register kJSFunctionRegister = rdi;
constexpr Register kContextRegister = rsi;
constexpr Register kAllocateSizeRegister = rdx;
-constexpr Register kSpeculationPoisonRegister = r11;
constexpr Register kInterpreterAccumulatorRegister = rax;
constexpr Register kInterpreterBytecodeOffsetRegister = r9;
constexpr Register kInterpreterBytecodeArrayRegister = r12;
diff --git a/deps/v8/src/codegen/x64/sse-instr.h b/deps/v8/src/codegen/x64/sse-instr.h
index 452cc0f690..d1223b69a1 100644
--- a/deps/v8/src/codegen/x64/sse-instr.h
+++ b/deps/v8/src/codegen/x64/sse-instr.h
@@ -32,6 +32,7 @@
V(sqrtss, F3, 0F, 51) \
V(addss, F3, 0F, 58) \
V(mulss, F3, 0F, 59) \
+ V(cvtss2sd, F3, 0F, 5A) \
V(subss, F3, 0F, 5C) \
V(minss, F3, 0F, 5D) \
V(divss, F3, 0F, 5E) \
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index 6aee59eb83..6df1da88ae 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -62,6 +62,9 @@ constexpr int GB = MB * 1024;
#if (V8_TARGET_ARCH_RISCV64 && !V8_HOST_ARCH_RISCV64)
#define USE_SIMULATOR 1
#endif
+#if (V8_TARGET_ARCH_LOONG64 && !V8_HOST_ARCH_LOONG64)
+#define USE_SIMULATOR 1
+#endif
#endif
// Determine whether the architecture uses an embedded constant pool
@@ -587,9 +590,14 @@ constexpr intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
constexpr intptr_t kDoubleAlignment = 8;
constexpr intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
-// Desired alignment for generated code is 32 bytes (to improve cache line
-// utilization).
+// Desired alignment for generated code is 64 bytes on x64 (to allow 64-bytes
+// loop header alignment) and 32 bytes (to improve cache line utilization) on
+// other architectures.
+#if V8_TARGET_ARCH_X64
+constexpr int kCodeAlignmentBits = 6;
+#else
constexpr int kCodeAlignmentBits = 5;
+#endif
constexpr intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
constexpr intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
@@ -1701,20 +1709,6 @@ enum IsolateAddressId {
kIsolateAddressCount
};
-enum class PoisoningMitigationLevel {
- kPoisonAll,
- kDontPoison,
- kPoisonCriticalOnly
-};
-
-enum class LoadSensitivity {
- kCritical, // Critical loads are poisoned whenever we can run untrusted
- // code (i.e., when --untrusted-code-mitigations is on).
- kUnsafe, // Unsafe loads are poisoned when full poisoning is on
- // (--branch-load-poisoning).
- kSafe // Safe loads are never poisoned.
-};
-
// The reason for a WebAssembly trap.
#define FOREACH_WASM_TRAPREASON(V) \
V(TrapUnreachable) \
@@ -1785,7 +1779,20 @@ constexpr int kSwissNameDictionaryInitialCapacity = 4;
constexpr int kSmallOrderedHashSetMinCapacity = 4;
constexpr int kSmallOrderedHashMapMinCapacity = 4;
-static const uint16_t kDontAdaptArgumentsSentinel = static_cast<uint16_t>(-1);
+#ifdef V8_INCLUDE_RECEIVER_IN_ARGC
+constexpr bool kJSArgcIncludesReceiver = true;
+constexpr int kJSArgcReceiverSlots = 1;
+constexpr uint16_t kDontAdaptArgumentsSentinel = 0;
+#else
+constexpr bool kJSArgcIncludesReceiver = false;
+constexpr int kJSArgcReceiverSlots = 0;
+constexpr uint16_t kDontAdaptArgumentsSentinel = static_cast<uint16_t>(-1);
+#endif
+
+// Helper to get the parameter count for functions with JS linkage.
+inline constexpr int JSParameterCount(int param_count_without_receiver) {
+ return param_count_without_receiver + kJSArgcReceiverSlots;
+}
// Opaque data type for identifying stack frames. Used extensively
// by the debugger.
diff --git a/deps/v8/src/common/message-template.h b/deps/v8/src/common/message-template.h
index 89ef319db1..a925300c5c 100644
--- a/deps/v8/src/common/message-template.h
+++ b/deps/v8/src/common/message-template.h
@@ -380,6 +380,7 @@ namespace internal {
T(TypedArrayTooLargeToSort, \
"Custom comparefn not supported for huge TypedArrays") \
T(ValueOutOfRange, "Value % out of range for % options property %") \
+ T(CollectionGrowFailed, "% maximum size exceeded") \
/* SyntaxError */ \
T(AmbiguousExport, \
"The requested module '%' contains conflicting star exports for name '%'") \
@@ -439,6 +440,10 @@ namespace internal {
T(InvalidRegExpFlags, "Invalid flags supplied to RegExp constructor '%'") \
T(InvalidOrUnexpectedToken, "Invalid or unexpected token") \
T(InvalidPrivateBrand, "Object must be an instance of class %") \
+ T(InvalidPrivateBrandReinitialization, \
+ "Cannot initialize private methods of class % twice on the same object") \
+ T(InvalidPrivateFieldReitialization, \
+ "Cannot initialize % twice on the same object") \
T(InvalidPrivateFieldResolution, \
"Private field '%' must be declared in an enclosing class") \
T(InvalidPrivateMemberRead, \
diff --git a/deps/v8/src/compiler-dispatcher/OWNERS b/deps/v8/src/compiler-dispatcher/OWNERS
index f08a549385..84cd0368eb 100644
--- a/deps/v8/src/compiler-dispatcher/OWNERS
+++ b/deps/v8/src/compiler-dispatcher/OWNERS
@@ -1,4 +1,3 @@
jkummerow@chromium.org
leszeks@chromium.org
-rmcilroy@chromium.org
victorgomes@chromium.org
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index f8a7fa8814..45f3684fb6 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -173,7 +173,6 @@ void OptimizingCompileDispatcher::AwaitCompileTasks() {
void OptimizingCompileDispatcher::FlushQueues(
BlockingBehavior blocking_behavior, bool restore_function_code) {
- if (FLAG_block_concurrent_recompilation) Unblock();
FlushInputQueue();
if (blocking_behavior == BlockingBehavior::kBlock) {
base::MutexGuard lock_guard(&ref_count_mutex_);
@@ -231,7 +230,7 @@ bool OptimizingCompileDispatcher::HasJobs() {
// Note: This relies on {output_queue_} being mutated by a background thread
// only when {ref_count_} is not zero. Also, {ref_count_} is never incremented
// by a background thread.
- return ref_count_ != 0 || !output_queue_.empty() || blocked_jobs_ != 0;
+ return ref_count_ != 0 || !output_queue_.empty();
}
void OptimizingCompileDispatcher::QueueForOptimization(
@@ -244,20 +243,8 @@ void OptimizingCompileDispatcher::QueueForOptimization(
input_queue_[InputQueueIndex(input_queue_length_)] = job;
input_queue_length_++;
}
- if (FLAG_block_concurrent_recompilation) {
- blocked_jobs_++;
- } else {
- V8::GetCurrentPlatform()->CallOnWorkerThread(
- std::make_unique<CompileTask>(isolate_, this));
- }
-}
-
-void OptimizingCompileDispatcher::Unblock() {
- while (blocked_jobs_ > 0) {
- V8::GetCurrentPlatform()->CallOnWorkerThread(
- std::make_unique<CompileTask>(isolate_, this));
- blocked_jobs_--;
- }
+ V8::GetCurrentPlatform()->CallOnWorkerThread(
+ std::make_unique<CompileTask>(isolate_, this));
}
} // namespace internal
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index 56592ed9b4..ccfb4f2a4a 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -30,7 +30,6 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
input_queue_length_(0),
input_queue_shift_(0),
- blocked_jobs_(0),
ref_count_(0),
recompilation_delay_(FLAG_concurrent_recompilation_delay) {
input_queue_ = NewArray<OptimizedCompilationJob*>(input_queue_capacity_);
@@ -42,7 +41,6 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
void Flush(BlockingBehavior blocking_behavior);
// Takes ownership of |job|.
void QueueForOptimization(OptimizedCompilationJob* job);
- void Unblock();
void AwaitCompileTasks();
void InstallOptimizedFunctions();
@@ -99,8 +97,6 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
// different threads.
base::Mutex output_queue_mutex_;
- int blocked_jobs_;
-
std::atomic<int> ref_count_;
base::Mutex ref_count_mutex_;
base::ConditionVariable ref_count_zero_;
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 1626bc5487..a415cbfa66 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -4,7 +4,6 @@ mvstanton@chromium.org
neis@chromium.org
nicohartmann@chromium.org
sigurds@chromium.org
-solanes@chromium.org
per-file wasm-*=ahaas@chromium.org
per-file wasm-*=bbudge@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 675371df57..fda0727dd1 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -82,25 +82,25 @@ FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() {
FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
- kFullWriteBarrier, LoadSensitivity::kCritical};
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer() {
- FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::TaggedPointer(),
- kPointerWriteBarrier, LoadSensitivity::kCritical};
+ FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectElements() {
- FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Internal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier, LoadSensitivity::kCritical};
+ FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -410,26 +410,22 @@ FieldAccess AccessBuilder::ForJSTypedArrayBasePointer() {
FieldAccess access = {kTaggedBase, JSTypedArray::kBasePointerOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::OtherInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier, LoadSensitivity::kCritical};
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSTypedArrayExternalPointer() {
- FieldAccess access = {kTaggedBase,
- JSTypedArray::kExternalPointerOffset,
- MaybeHandle<Name>(),
- MaybeHandle<Map>(),
- V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
- : Type::ExternalPointer(),
- MachineType::Pointer(),
- kNoWriteBarrier,
- LoadSensitivity::kCritical,
- ConstFieldInfo::None(),
- false,
-#ifdef V8_HEAP_SANDBOX
- kTypedArrayExternalPointerTag
-#endif
+ FieldAccess access = {
+ kTaggedBase,
+ JSTypedArray::kExternalPointerOffset,
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ Type::ExternalPointer(),
+ MachineType::Pointer(),
+ kNoWriteBarrier,
+ ConstFieldInfo::None(),
+ false,
};
return access;
}
@@ -441,16 +437,11 @@ FieldAccess AccessBuilder::ForJSDataViewDataPointer() {
JSDataView::kDataPointerOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
- V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
- : Type::ExternalPointer(),
+ Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier,
- LoadSensitivity::kUnsafe,
ConstFieldInfo::None(),
false,
-#ifdef V8_HEAP_SANDBOX
- kDataViewDataPointerTag,
-#endif
};
return access;
}
@@ -756,7 +747,6 @@ FieldAccess AccessBuilder::ForExternalStringResourceData() {
: Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier,
- LoadSensitivity::kUnsafe,
ConstFieldInfo::None(),
false,
#ifdef V8_HEAP_SANDBOX
@@ -902,10 +892,10 @@ FieldAccess AccessBuilder::ForWeakFixedArraySlot(int index) {
}
// static
FieldAccess AccessBuilder::ForCellValue() {
- FieldAccess access = {kTaggedBase, Cell::kValueOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
- kFullWriteBarrier, LoadSensitivity::kCritical};
+ FieldAccess access = {kTaggedBase, Cell::kValueOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
@@ -966,11 +956,9 @@ ElementAccess AccessBuilder::ForSloppyArgumentsElementsMappedEntry() {
}
// statics
-ElementAccess AccessBuilder::ForFixedArrayElement(
- ElementsKind kind, LoadSensitivity load_sensitivity) {
- ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize,
- Type::Any(), MachineType::AnyTagged(),
- kFullWriteBarrier, load_sensitivity};
+ElementAccess AccessBuilder::ForFixedArrayElement(ElementsKind kind) {
+ ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
+ MachineType::AnyTagged(), kFullWriteBarrier};
switch (kind) {
case PACKED_SMI_ELEMENTS:
access.type = Type::SignedSmall();
@@ -1038,59 +1026,50 @@ FieldAccess AccessBuilder::ForEnumCacheIndices() {
}
// static
-ElementAccess AccessBuilder::ForTypedArrayElement(
- ExternalArrayType type, bool is_external,
- LoadSensitivity load_sensitivity) {
+ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
+ bool is_external) {
BaseTaggedness taggedness = is_external ? kUntaggedBase : kTaggedBase;
int header_size = is_external ? 0 : ByteArray::kHeaderSize;
switch (type) {
case kExternalInt8Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Signed32(), MachineType::Int8(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Signed32(),
+ MachineType::Int8(), kNoWriteBarrier};
return access;
}
case kExternalUint8Array:
case kExternalUint8ClampedArray: {
- ElementAccess access = {taggedness, header_size,
- Type::Unsigned32(), MachineType::Uint8(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
+ MachineType::Uint8(), kNoWriteBarrier};
return access;
}
case kExternalInt16Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Signed32(), MachineType::Int16(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Signed32(),
+ MachineType::Int16(), kNoWriteBarrier};
return access;
}
case kExternalUint16Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Unsigned32(), MachineType::Uint16(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
+ MachineType::Uint16(), kNoWriteBarrier};
return access;
}
case kExternalInt32Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Signed32(), MachineType::Int32(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Signed32(),
+ MachineType::Int32(), kNoWriteBarrier};
return access;
}
case kExternalUint32Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Unsigned32(), MachineType::Uint32(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
+ MachineType::Uint32(), kNoWriteBarrier};
return access;
}
case kExternalFloat32Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Number(), MachineType::Float32(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Number(),
+ MachineType::Float32(), kNoWriteBarrier};
return access;
}
case kExternalFloat64Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Number(), MachineType::Float64(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Number(),
+ MachineType::Float64(), kNoWriteBarrier};
return access;
}
case kExternalBigInt64Array:
@@ -1239,15 +1218,6 @@ FieldAccess AccessBuilder::ForDictionaryObjectHashIndex() {
}
// static
-FieldAccess AccessBuilder::ForFeedbackCellValue() {
- FieldAccess access = {kTaggedBase, FeedbackCell::kValueOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::TaggedPointer(),
- kFullWriteBarrier};
- return access;
-}
-
-// static
FieldAccess AccessBuilder::ForFeedbackCellInterruptBudget() {
FieldAccess access = {kTaggedBase,
FeedbackCell::kInterruptBudgetOffset,
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index fa68628cf8..99ffde19c4 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -299,9 +299,7 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to FixedArray elements.
static ElementAccess ForFixedArrayElement();
- static ElementAccess ForFixedArrayElement(
- ElementsKind kind,
- LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe);
+ static ElementAccess ForFixedArrayElement(ElementsKind kind);
// Provides access to SloppyArgumentsElements elements.
static ElementAccess ForSloppyArgumentsElementsMappedEntry();
@@ -319,9 +317,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForEnumCacheIndices();
// Provides access to Fixed{type}TypedArray and External{type}Array elements.
- static ElementAccess ForTypedArrayElement(
- ExternalArrayType type, bool is_external,
- LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe);
+ static ElementAccess ForTypedArrayElement(ExternalArrayType type,
+ bool is_external);
// Provides access to HashTable fields.
static FieldAccess ForHashTableBaseNumberOfElements();
@@ -342,7 +339,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForDictionaryObjectHashIndex();
// Provides access to FeedbackCell fields.
- static FieldAccess ForFeedbackCellValue();
static FieldAccess ForFeedbackCellInterruptBudget();
// Provides access to a FeedbackVector fields.
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 21f453f4d8..e68ced7460 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -8,7 +8,6 @@
#include "src/builtins/accessors.h"
#include "src/compiler/compilation-dependencies.h"
-#include "src/compiler/compilation-dependency.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/ic/call-optimization.h"
@@ -57,7 +56,8 @@ bool HasFieldRepresentationDependenciesOnMap(
ZoneVector<CompilationDependency const*>& dependencies,
Handle<Map> const& field_owner_map) {
for (auto dep : dependencies) {
- if (dep->IsFieldRepresentationDependencyOnMap(field_owner_map)) {
+ if (CompilationDependencies::IsFieldRepresentationDependencyOnMap(
+ dep, field_owner_map)) {
return true;
}
}
@@ -109,6 +109,7 @@ PropertyAccessInfo PropertyAccessInfo::DataField(
FieldIndex field_index, Representation field_representation,
Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map,
base::Optional<JSObjectRef> holder, base::Optional<MapRef> transition_map) {
+ DCHECK(!field_representation.IsNone());
DCHECK_IMPLIES(
field_representation.IsDouble(),
HasFieldRepresentationDependenciesOnMap(
@@ -129,6 +130,7 @@ PropertyAccessInfo PropertyAccessInfo::FastDataConstant(
FieldIndex field_index, Representation field_representation,
Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map,
base::Optional<JSObjectRef> holder, base::Optional<MapRef> transition_map) {
+ DCHECK(!field_representation.IsNone());
return PropertyAccessInfo(kFastDataConstant, holder, transition_map,
field_index, field_representation, field_type,
field_owner_map, field_map, {{receiver_map}, zone},
@@ -384,7 +386,7 @@ AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker,
base::Optional<ElementAccessInfo> AccessInfoFactory::ComputeElementAccessInfo(
MapRef map, AccessMode access_mode) const {
- if (!CanInlineElementAccess(map)) return {};
+ if (!map.CanInlineElementAccess()) return {};
return ElementAccessInfo({{map}, zone()}, map.elements_kind(), zone());
}
@@ -542,7 +544,7 @@ PropertyAccessInfo AccessorAccessInfoHelper(
Handle<Cell> cell = broker->CanonicalPersistentHandle(
Cell::cast(module_namespace->module().exports().Lookup(
isolate, name.object(), Smi::ToInt(name.object()->GetHash()))));
- if (cell->value().IsTheHole(isolate)) {
+ if (cell->value(kRelaxedLoad).IsTheHole(isolate)) {
// This module has not been fully initialized yet.
return PropertyAccessInfo::Invalid(zone);
}
@@ -1050,7 +1052,7 @@ base::Optional<ElementAccessInfo> AccessInfoFactory::ConsolidateElementLoad(
base::Optional<MapRef> map = TryMakeRef(broker(), map_handle);
if (!map.has_value()) return {};
if (map->instance_type() != instance_type ||
- !CanInlineElementAccess(*map)) {
+ !map->CanInlineElementAccess()) {
return {};
}
if (!GeneralizeElementsKind(elements_kind, map->elements_kind())
@@ -1132,6 +1134,8 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
int const index = details.field_index();
Representation details_representation = details.representation();
+ if (details_representation.IsNone()) return Invalid();
+
FieldIndex field_index = FieldIndex::ForPropertyIndex(
*transition_map.object(), index, details_representation);
Type field_type = Type::NonInternal();
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index 29c7897ec9..7bc90fd822 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -36,9 +36,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
SBit OutputSBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
- case kFlags_branch_and_poison:
case kFlags_deoptimize:
- case kFlags_deoptimize_and_poison:
case kFlags_set:
case kFlags_trap:
case kFlags_select:
@@ -322,35 +320,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode,
- ArmOperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->and_(value, value, Operand(kSpeculationPoisonRegister));
- }
-}
-
-void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
- InstructionCode opcode,
- ArmOperandConverter const& i,
- Register address) {
- DCHECK_EQ(kMemoryAccessPoisoned, AccessModeField::decode(opcode));
- switch (AddressingModeField::decode(opcode)) {
- case kMode_Offset_RI:
- codegen->tasm()->mov(address, i.InputImmediate(1));
- codegen->tasm()->add(address, address, i.InputRegister(0));
- break;
- case kMode_Offset_RR:
- codegen->tasm()->add(address, i.InputRegister(0), i.InputRegister(1));
- break;
- default:
- UNREACHABLE();
- }
- codegen->tasm()->and_(address, address, Operand(kSpeculationPoisonRegister));
-}
-
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -360,12 +329,11 @@ void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
__ dmb(ISH); \
} while (0)
-#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
- do { \
- __ dmb(ISH); \
- __ asm_instr(i.InputRegister(2), \
- MemOperand(i.InputRegister(0), i.InputRegister(1))); \
- __ dmb(ISH); \
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, order) \
+ do { \
+ __ dmb(ISH); \
+ __ asm_instr(i.InputRegister(0), i.InputOffset(1)); \
+ if (order == AtomicMemoryOrder::kSeqCst) __ dmb(ISH); \
} while (0)
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
@@ -691,25 +659,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.Acquire();
-
- // Set a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ ComputeCodeStartAddress(scratch);
- __ cmp(kJavaScriptCallCodeStartRegister, scratch);
- __ mov(kSpeculationPoisonRegister, Operand(-1), SBit::LeaveCC, eq);
- __ mov(kSpeculationPoisonRegister, Operand(0), SBit::LeaveCC, ne);
- __ csdb();
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ and_(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ and_(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ and_(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -977,15 +926,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(0), DetermineStubCallMode());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArchStoreWithWriteBarrier: {
- RecordWriteMode mode =
- static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ case kArchStoreWithWriteBarrier: // Fall through.
+ case kArchAtomicStoreWithWriteBarrier: {
+ RecordWriteMode mode;
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ mode = static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ } else {
+ mode = AtomicStoreRecordWriteModeField::decode(instr->opcode());
+ }
Register object = i.InputRegister(0);
Register value = i.InputRegister(2);
AddressingMode addressing_mode =
AddressingModeField::decode(instr->opcode());
Operand offset(0);
+
+ if (arch_opcode == kArchAtomicStoreWithWriteBarrier) {
+ __ dmb(ISH);
+ }
if (addressing_mode == kMode_Offset_RI) {
int32_t immediate = i.InputInt32(1);
offset = Operand(immediate);
@@ -996,6 +954,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
offset = Operand(reg);
__ str(value, MemOperand(object, reg));
}
+ if (arch_opcode == kArchAtomicStoreWithWriteBarrier &&
+ AtomicMemoryOrderField::decode(instr->opcode()) ==
+ AtomicMemoryOrder::kSeqCst) {
+ __ dmb(ISH);
+ }
+
auto ool = zone()->New<OutOfLineRecordWrite>(
this, object, offset, value, mode, DetermineStubCallMode(),
&unwinding_info_writer_);
@@ -1619,12 +1583,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmLdrb:
__ ldrb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmLdrsb:
__ ldrsb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmStrb:
__ strb(i.InputRegister(0), i.InputOffset(1));
@@ -1632,11 +1594,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmLdrh:
__ ldrh(i.OutputRegister(), i.InputOffset());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmLdrsh:
__ ldrsh(i.OutputRegister(), i.InputOffset());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmStrh:
__ strh(i.InputRegister(0), i.InputOffset(1));
@@ -1644,22 +1604,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmLdr:
__ ldr(i.OutputRegister(), i.InputOffset());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmStr:
__ str(i.InputRegister(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVldrF32: {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- UseScratchRegisterScope temps(tasm());
- Register address = temps.Acquire();
- ComputePoisonedAddressForLoad(this, opcode, i, address);
- __ vldr(i.OutputFloatRegister(), address, 0);
- } else {
- __ vldr(i.OutputFloatRegister(), i.InputOffset());
- }
+ __ vldr(i.OutputFloatRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@@ -1688,15 +1639,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVldrF64: {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- UseScratchRegisterScope temps(tasm());
- Register address = temps.Acquire();
- ComputePoisonedAddressForLoad(this, opcode, i, address);
- __ vldr(i.OutputDoubleRegister(), address, 0);
- } else {
- __ vldr(i.OutputDoubleRegister(), i.InputOffset());
- }
+ __ vldr(i.OutputDoubleRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@@ -1832,10 +1775,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ isb(SY);
break;
}
- case kArchWordPoisonOnSpeculation:
- __ and_(i.OutputRegister(0), i.InputRegister(0),
- Operand(kSpeculationPoisonRegister));
- break;
case kArmVmullLow: {
auto dt = static_cast<NeonDataType>(MiscField::decode(instr->opcode()));
__ vmull(dt, i.OutputSimd128Register(), i.InputSimd128Register(0).low(),
@@ -3373,94 +3312,97 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ StoreLane(sz, src_list, i.InputUint8(1), i.NeonInputOperand(2));
break;
}
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsb);
break;
- case kWord32AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrb);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsh);
break;
- case kWord32AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrh);
break;
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
break;
- case kWord32AtomicStoreWord8:
- ASSEMBLE_ATOMIC_STORE_INTEGER(strb);
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(strb,
+ AtomicMemoryOrderField::decode(opcode));
break;
- case kWord32AtomicStoreWord16:
- ASSEMBLE_ATOMIC_STORE_INTEGER(strh);
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(strh,
+ AtomicMemoryOrderField::decode(opcode));
break;
- case kWord32AtomicStoreWord32:
- ASSEMBLE_ATOMIC_STORE_INTEGER(str);
+ case kAtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(str,
+ AtomicMemoryOrderField::decode(opcode));
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
break;
- case kWord32AtomicExchangeWord32:
+ case kAtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrex, strex);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxtb(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
i.TempRegister(2));
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxtb(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
i.TempRegister(2));
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxth(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
i.TempRegister(2));
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxth(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
i.TempRegister(2));
break;
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex,
i.InputRegister(2));
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
+ case kAtomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
__ sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kWord32Atomic##op##Uint8: \
+ case kAtomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
break; \
- case kWord32Atomic##op##Int16: \
+ case kAtomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
__ sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kWord32Atomic##op##Uint16: \
+ case kAtomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
break; \
- case kWord32Atomic##op##Word32: \
+ case kAtomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(ldrex, strex, inst); \
break;
ATOMIC_BINOP_CASE(Add, add)
@@ -3597,20 +3539,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- condition = NegateFlagsCondition(condition);
- __ eor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- Operand(kSpeculationPoisonRegister), SBit::LeaveCC,
- FlagsConditionToCondition(condition));
- __ csdb();
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3805,7 +3733,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -3955,12 +3882,20 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// DropArguments().
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
if (parameter_slots > 1) {
- const int parameter_slots_without_receiver = parameter_slots - 1;
- __ cmp(argc_reg, Operand(parameter_slots_without_receiver));
- __ mov(argc_reg, Operand(parameter_slots_without_receiver), LeaveCC, lt);
+ if (kJSArgcIncludesReceiver) {
+ __ cmp(argc_reg, Operand(parameter_slots));
+ __ mov(argc_reg, Operand(parameter_slots), LeaveCC, lt);
+ } else {
+ const int parameter_slots_without_receiver = parameter_slots - 1;
+ __ cmp(argc_reg, Operand(parameter_slots_without_receiver));
+ __ mov(argc_reg, Operand(parameter_slots_without_receiver), LeaveCC,
+ lt);
+ }
}
__ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
} else if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 2698d45ae7..3de9b2aab6 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -430,17 +430,18 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
void EmitStore(InstructionSelector* selector, InstructionCode opcode,
size_t input_count, InstructionOperand* inputs, Node* index) {
ArmOperandGenerator g(selector);
+ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
if (g.CanBeImmediate(index, opcode)) {
inputs[input_count++] = g.UseImmediate(index);
opcode |= AddressingModeField::encode(kMode_Offset_RI);
- } else if ((opcode == kArmStr) &&
+ } else if ((arch_opcode == kArmStr || arch_opcode == kAtomicStoreWord32) &&
TryMatchLSLImmediate(selector, &opcode, index, &inputs[2],
&inputs[3])) {
input_count = 4;
} else {
inputs[input_count++] = g.UseRegister(index);
- if (opcode == kArmVst1S128) {
+ if (arch_opcode == kArmVst1S128) {
// Inputs are value, base, index, only care about base and index.
EmitAddBeforeS128LoadStore(selector, &opcode, &input_count, &inputs[1]);
} else {
@@ -630,29 +631,69 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
InstructionOperand output = g.DefineAsRegister(node);
EmitLoad(this, opcode, &output, base, index);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
}
-void InstructionSelector::VisitStore(Node* node) {
- ArmOperandGenerator g(this);
+namespace {
+
+ArchOpcode GetStoreOpcode(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ return kArmVstrF32;
+ case MachineRepresentation::kFloat64:
+ return kArmVstrF64;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ return kArmStrb;
+ case MachineRepresentation::kWord16:
+ return kArmStrh;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ return kArmStr;
+ case MachineRepresentation::kSimd128:
+ return kArmVst1S128;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ }
+}
+
+ArchOpcode GetAtomicStoreOpcode(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ return kAtomicStoreWord8;
+ case MachineRepresentation::kWord16:
+ return kAtomicStoreWord16;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ return kAtomicStoreWord32;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void VisitStoreCommon(InstructionSelector* selector, Node* node,
+ StoreRepresentation store_rep,
+ base::Optional<AtomicMemoryOrder> atomic_order) {
+ ArmOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
@@ -678,58 +719,44 @@ void InstructionSelector::VisitStore(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(value);
RecordWriteMode record_write_mode =
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
- InstructionCode code = kArchStoreWithWriteBarrier;
+ InstructionCode code;
+ if (!atomic_order) {
+ code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ } else {
+ code = kArchAtomicStoreWithWriteBarrier;
+ code |= AtomicMemoryOrderField::encode(*atomic_order);
+ code |= AtomicStoreRecordWriteModeField::encode(record_write_mode);
+ }
code |= AddressingModeField::encode(addressing_mode);
- code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, input_count, inputs);
+ selector->Emit(code, 0, nullptr, input_count, inputs);
} else {
InstructionCode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kFloat32:
- opcode = kArmVstrF32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kArmVstrF64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = kArmStrb;
- break;
- case MachineRepresentation::kWord16:
- opcode = kArmStrh;
- break;
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord32:
- opcode = kArmStr;
- break;
- case MachineRepresentation::kSimd128:
- opcode = kArmVst1S128;
- break;
- case MachineRepresentation::kCompressedPointer: // Fall through.
- case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kMapWord: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
+ if (!atomic_order) {
+ opcode = GetStoreOpcode(rep);
+ } else {
+ // Release stores emit DMB ISH; STR while sequentially consistent stores
+ // emit DMB ISH; STR; DMB ISH.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ opcode = GetAtomicStoreOpcode(rep);
+ opcode |= AtomicMemoryOrderField::encode(*atomic_order);
}
ExternalReferenceMatcher m(base);
if (m.HasResolvedValue() &&
- CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
+ selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
Int32Matcher int_matcher(index);
if (int_matcher.HasResolvedValue()) {
ptrdiff_t const delta =
int_matcher.ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
- isolate(), m.ResolvedValue());
+ selector->isolate(), m.ResolvedValue());
int input_count = 2;
InstructionOperand inputs[2];
inputs[0] = g.UseRegister(value);
inputs[1] = g.UseImmediate(static_cast<int32_t>(delta));
opcode |= AddressingModeField::encode(kMode_Root);
- Emit(opcode, 0, nullptr, input_count, inputs);
+ selector->Emit(opcode, 0, nullptr, input_count, inputs);
return;
}
}
@@ -738,10 +765,17 @@ void InstructionSelector::VisitStore(Node* node) {
size_t input_count = 0;
inputs[input_count++] = g.UseRegister(value);
inputs[input_count++] = g.UseRegister(base);
- EmitStore(this, opcode, input_count, inputs, index);
+ EmitStore(selector, opcode, input_count, inputs, index);
}
}
+} // namespace
+
+void InstructionSelector::VisitStore(Node* node) {
+ VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
+ base::nullopt);
+}
+
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -2236,22 +2270,27 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit LDR; DMB ISH.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
+ opcode = kAtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -2261,34 +2300,9 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArmOperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
- break;
- default:
- UNREACHABLE();
- }
-
- AddressingMode addressing_mode = kMode_Offset_RR;
- InstructionOperand inputs[4];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
- inputs[input_count++] = g.UseUniqueRegister(value);
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 0, nullptr, input_count, inputs);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitStoreCommon(this, node, store_params.store_representation(),
+ store_params.order());
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
@@ -2299,15 +2313,15 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2334,15 +2348,15 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2399,12 +2413,11 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index c121383426..fcab0a739b 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -235,7 +235,6 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): RPO immediates on arm64.
- break;
}
UNREACHABLE();
}
@@ -460,47 +459,6 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
}
#endif // V8_ENABLE_WEBASSEMBLY
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- Arm64OperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- Register poison = value.Is64Bits() ? kSpeculationPoisonRegister
- : kSpeculationPoisonRegister.W();
- codegen->tasm()->And(value, value, Operand(poison));
- }
-}
-
-void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode,
- Arm64OperandConverter* i, VRegister output_reg) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- AddressingMode address_mode = AddressingModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned && address_mode != kMode_Root) {
- UseScratchRegisterScope temps(codegen->tasm());
- Register address = temps.AcquireX();
- switch (address_mode) {
- case kMode_MRI: // Fall through.
- case kMode_MRR:
- codegen->tasm()->Add(address, i->InputRegister(0), i->InputOperand(1));
- break;
- case kMode_Operand2_R_LSL_I:
- codegen->tasm()->Add(address, i->InputRegister(0),
- i->InputOperand2_64(1));
- break;
- default:
- // Note: we don't need poisoning for kMode_Root loads as those loads
- // target a fixed offset from root register which is set once when
- // initializing the vm.
- UNREACHABLE();
- }
- codegen->tasm()->And(address, address, Operand(kSpeculationPoisonRegister));
- codegen->tasm()->Ldr(output_reg, MemOperand(address));
- } else {
- codegen->tasm()->Ldr(output_reg, i->MemoryOperand());
- }
-}
-
// Handles unary ops that work for float (scalar), double (scalar), or NEON.
template <typename Fn>
void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
@@ -714,29 +672,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Bind(&not_deoptimized);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.AcquireX();
-
- // Set a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ ComputeCodeStartAddress(scratch);
- __ Cmp(kJavaScriptCallCodeStartRegister, scratch);
- __ Csetm(kSpeculationPoisonRegister, eq);
- __ Csdb();
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.AcquireX();
-
- __ Mov(scratch, sp);
- __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ And(scratch, scratch, kSpeculationPoisonRegister);
- __ Mov(sp, scratch);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -1034,6 +969,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Bind(ool->exit());
break;
}
+ case kArchAtomicStoreWithWriteBarrier: {
+ DCHECK_EQ(AddressingModeField::decode(instr->opcode()), kMode_MRR);
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register offset = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ auto ool = zone()->New<OutOfLineRecordWrite>(
+ this, object, offset, value, mode, DetermineStubCallMode(),
+ &unwinding_info_writer_);
+ __ AtomicStoreTaggedField(value, object, offset, i.TempRegister(0));
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
+ __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
+ eq, ool->entry());
+ __ Bind(ool->exit());
+ break;
+ }
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
@@ -1232,6 +1186,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0).Format(src_f));
break;
}
+ case kArm64ISplat: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ Register src = LaneSizeField::decode(opcode) == 64 ? i.InputRegister64(0)
+ : i.InputRegister32(0);
+ __ Dup(i.OutputSimd128Register().Format(f), src);
+ break;
+ }
+ case kArm64FSplat: {
+ VectorFormat src_f =
+ ScalarFormatFromLaneSize(LaneSizeField::decode(opcode));
+ VectorFormat dst_f = VectorFormatFillQ(src_f);
+ __ Dup(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f), 0);
+ break;
+ }
+ case kArm64Smlal: {
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidth(dst_f);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Smlal(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(1).Format(src_f),
+ i.InputSimd128Register(2).Format(src_f));
+ break;
+ }
+ case kArm64Smlal2: {
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Smlal2(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(1).Format(src_f),
+ i.InputSimd128Register(2).Format(src_f));
+ break;
+ }
case kArm64Smull: {
if (instr->InputAt(0)->IsRegister()) {
__ Smull(i.OutputRegister(), i.InputRegister32(0),
@@ -1254,6 +1241,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1).Format(src_f));
break;
}
+ case kArm64Umlal: {
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidth(dst_f);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Umlal(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(1).Format(src_f),
+ i.InputSimd128Register(2).Format(src_f));
+ break;
+ }
+ case kArm64Umlal2: {
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Umlal2(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(1).Format(src_f),
+ i.InputSimd128Register(2).Format(src_f));
+ break;
+ }
case kArm64Umull: {
if (instr->InputAt(0)->IsRegister()) {
__ Umull(i.OutputRegister(), i.InputRegister32(0),
@@ -1551,6 +1556,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Cmn32:
__ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
+ case kArm64Cnt32: {
+ __ PopcntHelper(i.OutputRegister32(), i.InputRegister32(0));
+ break;
+ }
+ case kArm64Cnt64: {
+ __ PopcntHelper(i.OutputRegister64(), i.InputRegister64(0));
+ break;
+ }
case kArm64Cnt: {
VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
__ Cnt(i.OutputSimd128Register().Format(f),
@@ -1814,12 +1827,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldrb:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsb:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrsbW:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1832,12 +1843,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldrh:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsh:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrshW:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1850,12 +1859,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldrsw:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrW:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputRegister32(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64StrW:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1864,19 +1871,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldr:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrDecompressTaggedSigned:
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrDecompressTaggedPointer:
__ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrDecompressAnyTagged:
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kArm64LdarDecompressTaggedSigned:
+ __ AtomicDecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0),
+ i.InputRegister(1), i.TempRegister(0));
+ break;
+ case kArm64LdarDecompressTaggedPointer:
+ __ AtomicDecompressTaggedPointer(i.OutputRegister(), i.InputRegister(0),
+ i.InputRegister(1), i.TempRegister(0));
+ break;
+ case kArm64LdarDecompressAnyTagged:
+ __ AtomicDecompressAnyTagged(i.OutputRegister(), i.InputRegister(0),
+ i.InputRegister(1), i.TempRegister(0));
break;
case kArm64Str:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1885,9 +1900,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrCompressTagged:
__ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
+ case kArm64StlrCompressTagged:
+ // To be consistent with other STLR instructions, the value is stored at
+ // the 3rd input register instead of the 1st.
+ __ AtomicStoreTaggedField(i.InputRegister(2), i.InputRegister(0),
+ i.InputRegister(1), i.TempRegister(0));
+ break;
case kArm64LdrS:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister().S());
+ __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
break;
case kArm64StrS:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1895,7 +1916,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64LdrD:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister());
+ __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
break;
case kArm64StrD:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1916,117 +1937,100 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Dsb(FullSystem, BarrierAll);
__ Isb();
break;
- case kArchWordPoisonOnSpeculation:
- __ And(i.OutputRegister(0), i.InputRegister(0),
- Operand(kSpeculationPoisonRegister));
- break;
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb, Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicLoadUint8:
- case kArm64Word64AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb, Register32);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh, Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicLoadUint16:
- case kArm64Word64AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh, Register32);
break;
- case kWord32AtomicLoadWord32:
- case kArm64Word64AtomicLoadUint32:
+ case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar, Register32);
break;
case kArm64Word64AtomicLoadUint64:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar, Register);
break;
- case kWord32AtomicStoreWord8:
- case kArm64Word64AtomicStoreWord8:
+ case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrb, Register32);
break;
- case kWord32AtomicStoreWord16:
- case kArm64Word64AtomicStoreWord16:
+ case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrh, Register32);
break;
- case kWord32AtomicStoreWord32:
- case kArm64Word64AtomicStoreWord32:
+ case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr, Register32);
break;
case kArm64Word64AtomicStoreWord64:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr, Register);
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb, Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicExchangeUint8:
- case kArm64Word64AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb, Register32);
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh, Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicExchangeUint16:
- case kArm64Word64AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh, Register32);
break;
- case kWord32AtomicExchangeWord32:
- case kArm64Word64AtomicExchangeUint32:
+ case kAtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr, Register32);
break;
case kArm64Word64AtomicExchangeUint64:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr, Register);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB,
Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicCompareExchangeUint8:
- case kArm64Word64AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB,
Register32);
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH,
Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicCompareExchangeUint16:
- case kArm64Word64AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH,
Register32);
break;
- case kWord32AtomicCompareExchangeWord32:
- case kArm64Word64AtomicCompareExchangeUint32:
+ case kAtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTW, Register32);
break;
case kArm64Word64AtomicCompareExchangeUint64:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTX, Register);
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
+ case kAtomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst, Register32); \
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kWord32Atomic##op##Uint8: \
- case kArm64Word64Atomic##op##Uint8: \
+ case kAtomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst, Register32); \
break; \
- case kWord32Atomic##op##Int16: \
+ case kAtomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst, Register32); \
__ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kWord32Atomic##op##Uint16: \
- case kArm64Word64Atomic##op##Uint16: \
+ case kAtomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst, Register32); \
break; \
- case kWord32Atomic##op##Word32: \
- case kArm64Word64Atomic##op##Uint32: \
+ case kAtomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst, Register32); \
break; \
case kArm64Word64Atomic##op##Uint64: \
@@ -2052,12 +2056,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Instr(i.OutputSimd128Register().V##FORMAT(), \
i.InputSimd128Register(0).V##FORMAT()); \
break;
+#define SIMD_UNOP_LANE_SIZE_CASE(Op, Instr) \
+ case Op: { \
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
+ __ Instr(i.OutputSimd128Register().Format(f), \
+ i.InputSimd128Register(0).Format(f)); \
+ break; \
+ }
#define SIMD_BINOP_CASE(Op, Instr, FORMAT) \
case Op: \
__ Instr(i.OutputSimd128Register().V##FORMAT(), \
i.InputSimd128Register(0).V##FORMAT(), \
i.InputSimd128Register(1).V##FORMAT()); \
break;
+#define SIMD_BINOP_LANE_SIZE_CASE(Op, Instr) \
+ case Op: { \
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
+ __ Instr(i.OutputSimd128Register().Format(f), \
+ i.InputSimd128Register(0).Format(f), \
+ i.InputSimd128Register(1).Format(f)); \
+ break; \
+ }
#define SIMD_DESTRUCTIVE_BINOP_CASE(Op, Instr, FORMAT) \
case Op: { \
VRegister dst = i.OutputSimd128Register().V##FORMAT(); \
@@ -2066,7 +2085,33 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(2).V##FORMAT()); \
break; \
}
-
+#define SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE(Op, Instr) \
+ case Op: { \
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
+ VRegister dst = i.OutputSimd128Register().Format(f); \
+ DCHECK_EQ(dst, i.InputSimd128Register(0).Format(f)); \
+ __ Instr(dst, i.InputSimd128Register(1).Format(f), \
+ i.InputSimd128Register(2).Format(f)); \
+ break; \
+ }
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FMin, Fmin);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FMax, Fmax);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64FAbs, Fabs);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64FSqrt, Fsqrt);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FAdd, Fadd);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FSub, Fsub);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FMul, Fmul);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FDiv, Fdiv);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64FNeg, Fneg);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64IAbs, Abs);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64INeg, Neg);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64RoundingAverageU, Urhadd);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IMinS, Smin);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IMaxS, Smax);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IMinU, Umin);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IMaxU, Umax);
+ SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE(kArm64Mla, Mla);
+ SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE(kArm64Mls, Mls);
case kArm64Sxtl: {
VectorFormat wide = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat narrow = VectorFormatHalfWidth(wide);
@@ -2129,49 +2174,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0).V2S());
break;
}
- case kArm64F64x2Splat: {
- __ Dup(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).D(), 0);
+ case kArm64FExtractLane: {
+ VectorFormat dst_f =
+ ScalarFormatFromLaneSize(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatFillQ(dst_f);
+ __ Mov(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f), i.InputInt8(1));
break;
}
- case kArm64F64x2ExtractLane: {
- __ Mov(i.OutputSimd128Register().D(), i.InputSimd128Register(0).V2D(),
- i.InputInt8(1));
- break;
- }
- case kArm64F64x2ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V2D(),
- src1 = i.InputSimd128Register(0).V2D();
+ case kArm64FReplaceLane: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VRegister dst = i.OutputSimd128Register().Format(f),
+ src1 = i.InputSimd128Register(0).Format(f);
if (dst != src1) {
__ Mov(dst, src1);
}
- __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).V2D(), 0);
- break;
- }
- SIMD_UNOP_CASE(kArm64F64x2Abs, Fabs, 2D);
- SIMD_UNOP_CASE(kArm64F64x2Neg, Fneg, 2D);
- SIMD_UNOP_CASE(kArm64F64x2Sqrt, Fsqrt, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Add, Fadd, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Sub, Fsub, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Mul, Fmul, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Div, Fdiv, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Min, Fmin, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Max, Fmax, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Eq, Fcmeq, 2D);
- case kArm64F64x2Ne: {
- VRegister dst = i.OutputSimd128Register().V2D();
- __ Fcmeq(dst, i.InputSimd128Register(0).V2D(),
- i.InputSimd128Register(1).V2D());
+ __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).Format(f), 0);
+ break;
+ }
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FEq, Fcmeq);
+ case kArm64FNe: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VRegister dst = i.OutputSimd128Register().Format(f);
+ __ Fcmeq(dst, i.InputSimd128Register(0).Format(f),
+ i.InputSimd128Register(1).Format(f));
__ Mvn(dst, dst);
break;
}
- case kArm64F64x2Lt: {
- __ Fcmgt(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(),
- i.InputSimd128Register(0).V2D());
+ case kArm64FLt: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ __ Fcmgt(i.OutputSimd128Register().Format(f),
+ i.InputSimd128Register(1).Format(f),
+ i.InputSimd128Register(0).Format(f));
break;
}
- case kArm64F64x2Le: {
- __ Fcmge(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(),
- i.InputSimd128Register(0).V2D());
+ case kArm64FLe: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ __ Fcmge(i.OutputSimd128Register().Format(f),
+ i.InputSimd128Register(1).Format(f),
+ i.InputSimd128Register(0).Format(f));
break;
}
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfma, Fmla, 2D);
@@ -2197,63 +2238,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
break;
}
- case kArm64F32x4Splat: {
- __ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0);
- break;
- }
- case kArm64F32x4ExtractLane: {
- __ Mov(i.OutputSimd128Register().S(), i.InputSimd128Register(0).V4S(),
- i.InputInt8(1));
- break;
- }
- case kArm64F32x4ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V4S(),
- src1 = i.InputSimd128Register(0).V4S();
- if (dst != src1) {
- __ Mov(dst, src1);
- }
- __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).V4S(), 0);
- break;
- }
SIMD_UNOP_CASE(kArm64F32x4SConvertI32x4, Scvtf, 4S);
SIMD_UNOP_CASE(kArm64F32x4UConvertI32x4, Ucvtf, 4S);
- SIMD_UNOP_CASE(kArm64F32x4Abs, Fabs, 4S);
- SIMD_UNOP_CASE(kArm64F32x4Neg, Fneg, 4S);
- SIMD_UNOP_CASE(kArm64F32x4Sqrt, Fsqrt, 4S);
SIMD_UNOP_CASE(kArm64F32x4RecipApprox, Frecpe, 4S);
SIMD_UNOP_CASE(kArm64F32x4RecipSqrtApprox, Frsqrte, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Add, Fadd, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Sub, Fsub, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Mul, Fmul, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Div, Fdiv, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Min, Fmin, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Max, Fmax, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Eq, Fcmeq, 4S);
- case kArm64F32x4MulElement: {
- __ Fmul(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
- i.InputSimd128Register(1).S(), i.InputInt8(2));
- break;
- }
- case kArm64F64x2MulElement: {
- __ Fmul(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(),
- i.InputSimd128Register(1).D(), i.InputInt8(2));
- break;
- }
- case kArm64F32x4Ne: {
- VRegister dst = i.OutputSimd128Register().V4S();
- __ Fcmeq(dst, i.InputSimd128Register(0).V4S(),
- i.InputSimd128Register(1).V4S());
- __ Mvn(dst, dst);
- break;
- }
- case kArm64F32x4Lt: {
- __ Fcmgt(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(),
- i.InputSimd128Register(0).V4S());
- break;
- }
- case kArm64F32x4Le: {
- __ Fcmge(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(),
- i.InputSimd128Register(0).V4S());
+ case kArm64FMulElement: {
+ VectorFormat s_f =
+ ScalarFormatFromLaneSize(LaneSizeField::decode(opcode));
+ VectorFormat v_f = VectorFormatFillQ(s_f);
+ __ Fmul(i.OutputSimd128Register().Format(v_f),
+ i.InputSimd128Register(0).Format(v_f),
+ i.InputSimd128Register(1).Format(s_f), i.InputInt8(2));
break;
}
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F32x4Qfma, Fmla, 4S);
@@ -2279,26 +2274,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
break;
}
- case kArm64I64x2Splat: {
- __ Dup(i.OutputSimd128Register().V2D(), i.InputRegister64(0));
- break;
- }
- case kArm64I64x2ExtractLane: {
- __ Mov(i.OutputRegister64(), i.InputSimd128Register(0).V2D(),
- i.InputInt8(1));
+ case kArm64IExtractLane: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ Register dst =
+ f == kFormat2D ? i.OutputRegister64() : i.OutputRegister32();
+ __ Mov(dst, i.InputSimd128Register(0).Format(f), i.InputInt8(1));
break;
}
- case kArm64I64x2ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V2D(),
- src1 = i.InputSimd128Register(0).V2D();
+ case kArm64IReplaceLane: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VRegister dst = i.OutputSimd128Register().Format(f),
+ src1 = i.InputSimd128Register(0).Format(f);
+ Register src2 =
+ f == kFormat2D ? i.InputRegister64(2) : i.InputRegister32(2);
if (dst != src1) {
__ Mov(dst, src1);
}
- __ Mov(dst, i.InputInt8(1), i.InputRegister64(2));
+ __ Mov(dst, i.InputInt8(1), src2);
break;
}
- SIMD_UNOP_CASE(kArm64I64x2Abs, Abs, 2D);
- SIMD_UNOP_CASE(kArm64I64x2Neg, Neg, 2D);
case kArm64I64x2Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 6, V2D, Sshl, X);
break;
@@ -2307,8 +2301,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT_RIGHT(Sshr, 6, V2D, Sshl, X);
break;
}
- SIMD_BINOP_CASE(kArm64I64x2Add, Add, 2D);
- SIMD_BINOP_CASE(kArm64I64x2Sub, Sub, 2D);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IAdd, Add);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64ISub, Sub);
case kArm64I64x2Mul: {
UseScratchRegisterScope scope(tasm());
VRegister dst = i.OutputSimd128Register();
@@ -2368,16 +2362,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
- SIMD_BINOP_CASE(kArm64I64x2Eq, Cmeq, 2D);
- case kArm64I64x2Ne: {
- VRegister dst = i.OutputSimd128Register().V2D();
- __ Cmeq(dst, i.InputSimd128Register(0).V2D(),
- i.InputSimd128Register(1).V2D());
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IEq, Cmeq);
+ case kArm64INe: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VRegister dst = i.OutputSimd128Register().Format(f);
+ __ Cmeq(dst, i.InputSimd128Register(0).Format(f),
+ i.InputSimd128Register(1).Format(f));
__ Mvn(dst, dst);
break;
}
- SIMD_BINOP_CASE(kArm64I64x2GtS, Cmgt, 2D);
- SIMD_BINOP_CASE(kArm64I64x2GeS, Cmge, 2D);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtS, Cmgt);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeS, Cmge);
case kArm64I64x2ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 6, V2D, Ushl, X);
break;
@@ -2386,26 +2381,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ I64x2BitMask(i.OutputRegister32(), i.InputSimd128Register(0));
break;
}
- case kArm64I32x4Splat: {
- __ Dup(i.OutputSimd128Register().V4S(), i.InputRegister32(0));
- break;
- }
- case kArm64I32x4ExtractLane: {
- __ Mov(i.OutputRegister32(), i.InputSimd128Register(0).V4S(),
- i.InputInt8(1));
- break;
- }
- case kArm64I32x4ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V4S(),
- src1 = i.InputSimd128Register(0).V4S();
- if (dst != src1) {
- __ Mov(dst, src1);
- }
- __ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
- break;
- }
SIMD_UNOP_CASE(kArm64I32x4SConvertF32x4, Fcvtzs, 4S);
- SIMD_UNOP_CASE(kArm64I32x4Neg, Neg, 4S);
case kArm64I32x4Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 5, V4S, Sshl, W);
break;
@@ -2414,33 +2390,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT_RIGHT(Sshr, 5, V4S, Sshl, W);
break;
}
- SIMD_BINOP_CASE(kArm64I32x4Add, Add, 4S);
- SIMD_BINOP_CASE(kArm64I32x4Sub, Sub, 4S);
SIMD_BINOP_CASE(kArm64I32x4Mul, Mul, 4S);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I32x4Mla, Mla, 4S);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I32x4Mls, Mls, 4S);
- SIMD_BINOP_CASE(kArm64I32x4MinS, Smin, 4S);
- SIMD_BINOP_CASE(kArm64I32x4MaxS, Smax, 4S);
- SIMD_BINOP_CASE(kArm64I32x4Eq, Cmeq, 4S);
- case kArm64I32x4Ne: {
- VRegister dst = i.OutputSimd128Register().V4S();
- __ Cmeq(dst, i.InputSimd128Register(0).V4S(),
- i.InputSimd128Register(1).V4S());
- __ Mvn(dst, dst);
- break;
- }
- SIMD_BINOP_CASE(kArm64I32x4GtS, Cmgt, 4S);
- SIMD_BINOP_CASE(kArm64I32x4GeS, Cmge, 4S);
SIMD_UNOP_CASE(kArm64I32x4UConvertF32x4, Fcvtzu, 4S);
case kArm64I32x4ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 5, V4S, Ushl, W);
break;
}
- SIMD_BINOP_CASE(kArm64I32x4MinU, Umin, 4S);
- SIMD_BINOP_CASE(kArm64I32x4MaxU, Umax, 4S);
- SIMD_BINOP_CASE(kArm64I32x4GtU, Cmhi, 4S);
- SIMD_BINOP_CASE(kArm64I32x4GeU, Cmhs, 4S);
- SIMD_UNOP_CASE(kArm64I32x4Abs, Abs, 4S);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtU, Cmhi);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeU, Cmhs);
case kArm64I32x4BitMask: {
UseScratchRegisterScope scope(tasm());
Register dst = i.OutputRegister32();
@@ -2468,30 +2425,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Addp(i.OutputSimd128Register().V4S(), tmp1, tmp2);
break;
}
- case kArm64I16x8Splat: {
- __ Dup(i.OutputSimd128Register().V8H(), i.InputRegister32(0));
- break;
- }
- case kArm64I16x8ExtractLaneU: {
- __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(),
+ case kArm64IExtractLaneU: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).Format(f),
i.InputInt8(1));
break;
}
- case kArm64I16x8ExtractLaneS: {
- __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(),
+ case kArm64IExtractLaneS: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).Format(f),
i.InputInt8(1));
break;
}
- case kArm64I16x8ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V8H(),
- src1 = i.InputSimd128Register(0).V8H();
- if (dst != src1) {
- __ Mov(dst, src1);
- }
- __ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
- break;
- }
- SIMD_UNOP_CASE(kArm64I16x8Neg, Neg, 8H);
case kArm64I16x8Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 4, V8H, Sshl, W);
break;
@@ -2514,25 +2459,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtn2(dst.V8H(), src1.V4S());
break;
}
- SIMD_BINOP_CASE(kArm64I16x8Add, Add, 8H);
- SIMD_BINOP_CASE(kArm64I16x8AddSatS, Sqadd, 8H);
- SIMD_BINOP_CASE(kArm64I16x8Sub, Sub, 8H);
- SIMD_BINOP_CASE(kArm64I16x8SubSatS, Sqsub, 8H);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IAddSatS, Sqadd);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatS, Sqsub);
SIMD_BINOP_CASE(kArm64I16x8Mul, Mul, 8H);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I16x8Mla, Mla, 8H);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I16x8Mls, Mls, 8H);
- SIMD_BINOP_CASE(kArm64I16x8MinS, Smin, 8H);
- SIMD_BINOP_CASE(kArm64I16x8MaxS, Smax, 8H);
- SIMD_BINOP_CASE(kArm64I16x8Eq, Cmeq, 8H);
- case kArm64I16x8Ne: {
- VRegister dst = i.OutputSimd128Register().V8H();
- __ Cmeq(dst, i.InputSimd128Register(0).V8H(),
- i.InputSimd128Register(1).V8H());
- __ Mvn(dst, dst);
- break;
- }
- SIMD_BINOP_CASE(kArm64I16x8GtS, Cmgt, 8H);
- SIMD_BINOP_CASE(kArm64I16x8GeS, Cmge, 8H);
case kArm64I16x8ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 4, V8H, Ushl, W);
break;
@@ -2551,15 +2480,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtun2(dst.V8H(), src1.V4S());
break;
}
- SIMD_BINOP_CASE(kArm64I16x8AddSatU, Uqadd, 8H);
- SIMD_BINOP_CASE(kArm64I16x8SubSatU, Uqsub, 8H);
- SIMD_BINOP_CASE(kArm64I16x8MinU, Umin, 8H);
- SIMD_BINOP_CASE(kArm64I16x8MaxU, Umax, 8H);
- SIMD_BINOP_CASE(kArm64I16x8GtU, Cmhi, 8H);
- SIMD_BINOP_CASE(kArm64I16x8GeU, Cmhs, 8H);
- SIMD_BINOP_CASE(kArm64I16x8RoundingAverageU, Urhadd, 8H);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IAddSatU, Uqadd);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatU, Uqsub);
SIMD_BINOP_CASE(kArm64I16x8Q15MulRSatS, Sqrdmulh, 8H);
- SIMD_UNOP_CASE(kArm64I16x8Abs, Abs, 8H);
case kArm64I16x8BitMask: {
UseScratchRegisterScope scope(tasm());
Register dst = i.OutputRegister32();
@@ -2576,30 +2499,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Mov(dst.W(), tmp.V8H(), 0);
break;
}
- case kArm64I8x16Splat: {
- __ Dup(i.OutputSimd128Register().V16B(), i.InputRegister32(0));
- break;
- }
- case kArm64I8x16ExtractLaneU: {
- __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(),
- i.InputInt8(1));
- break;
- }
- case kArm64I8x16ExtractLaneS: {
- __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(),
- i.InputInt8(1));
- break;
- }
- case kArm64I8x16ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V16B(),
- src1 = i.InputSimd128Register(0).V16B();
- if (dst != src1) {
- __ Mov(dst, src1);
- }
- __ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
- break;
- }
- SIMD_UNOP_CASE(kArm64I8x16Neg, Neg, 16B);
case kArm64I8x16Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 3, V16B, Sshl, W);
break;
@@ -2622,24 +2521,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtn2(dst.V16B(), src1.V8H());
break;
}
- SIMD_BINOP_CASE(kArm64I8x16Add, Add, 16B);
- SIMD_BINOP_CASE(kArm64I8x16AddSatS, Sqadd, 16B);
- SIMD_BINOP_CASE(kArm64I8x16Sub, Sub, 16B);
- SIMD_BINOP_CASE(kArm64I8x16SubSatS, Sqsub, 16B);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I8x16Mla, Mla, 16B);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I8x16Mls, Mls, 16B);
- SIMD_BINOP_CASE(kArm64I8x16MinS, Smin, 16B);
- SIMD_BINOP_CASE(kArm64I8x16MaxS, Smax, 16B);
- SIMD_BINOP_CASE(kArm64I8x16Eq, Cmeq, 16B);
- case kArm64I8x16Ne: {
- VRegister dst = i.OutputSimd128Register().V16B();
- __ Cmeq(dst, i.InputSimd128Register(0).V16B(),
- i.InputSimd128Register(1).V16B());
- __ Mvn(dst, dst);
- break;
- }
- SIMD_BINOP_CASE(kArm64I8x16GtS, Cmgt, 16B);
- SIMD_BINOP_CASE(kArm64I8x16GeS, Cmge, 16B);
case kArm64I8x16ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 3, V16B, Ushl, W);
break;
@@ -2658,14 +2539,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtun2(dst.V16B(), src1.V8H());
break;
}
- SIMD_BINOP_CASE(kArm64I8x16AddSatU, Uqadd, 16B);
- SIMD_BINOP_CASE(kArm64I8x16SubSatU, Uqsub, 16B);
- SIMD_BINOP_CASE(kArm64I8x16MinU, Umin, 16B);
- SIMD_BINOP_CASE(kArm64I8x16MaxU, Umax, 16B);
- SIMD_BINOP_CASE(kArm64I8x16GtU, Cmhi, 16B);
- SIMD_BINOP_CASE(kArm64I8x16GeU, Cmhs, 16B);
- SIMD_BINOP_CASE(kArm64I8x16RoundingAverageU, Urhadd, 16B);
- SIMD_UNOP_CASE(kArm64I8x16Abs, Abs, 16B);
case kArm64I8x16BitMask: {
UseScratchRegisterScope scope(tasm());
Register dst = i.OutputRegister32();
@@ -2716,12 +2589,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
default:
UNREACHABLE();
- break;
}
break;
}
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64S128Select, Bsl, 16B);
SIMD_BINOP_CASE(kArm64S128AndNot, Bic, 16B);
+ case kArm64Ssra: {
+ int8_t laneSize = LaneSizeField::decode(opcode);
+ VectorFormat f = VectorFormatFillQ(laneSize);
+ int8_t mask = laneSize - 1;
+ VRegister dst = i.OutputSimd128Register().Format(f);
+ DCHECK_EQ(dst, i.InputSimd128Register(0).Format(f));
+ __ Ssra(dst, i.InputSimd128Register(1).Format(f), i.InputInt8(2) & mask);
+ break;
+ }
+ case kArm64Usra: {
+ int8_t laneSize = LaneSizeField::decode(opcode);
+ VectorFormat f = VectorFormatFillQ(laneSize);
+ int8_t mask = laneSize - 1;
+ VRegister dst = i.OutputSimd128Register().Format(f);
+ DCHECK_EQ(dst, i.InputSimd128Register(0).Format(f));
+ __ Usra(dst, i.InputSimd128Register(1).Format(f), i.InputUint8(2) & mask);
+ break;
+ }
case kArm64S32x4Shuffle: {
Simd128Register dst = i.OutputSimd128Register().V4S(),
src0 = i.InputSimd128Register(0).V4S(),
@@ -2892,8 +2782,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
#undef SIMD_UNOP_CASE
+#undef SIMD_UNOP_LANE_SIZE_CASE
#undef SIMD_BINOP_CASE
+#undef SIMD_BINOP_LANE_SIZE_CASE
#undef SIMD_DESTRUCTIVE_BINOP_CASE
+#undef SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE
#undef SIMD_REDUCE_OP_CASE
#undef ASSEMBLE_SIMD_SHIFT_LEFT
#undef ASSEMBLE_SIMD_SHIFT_RIGHT
@@ -2907,7 +2800,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
ArchOpcode opcode = instr->arch_opcode();
if (opcode == kArm64CompareAndBranch32) {
- DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Cbz(i.InputRegister32(0), tlabel);
@@ -2919,7 +2811,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64CompareAndBranch) {
- DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Cbz(i.InputRegister64(0), tlabel);
@@ -2931,7 +2822,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch32) {
- DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
@@ -2943,7 +2833,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch) {
- DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
@@ -2961,19 +2850,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ B(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- condition = NegateFlagsCondition(condition);
- __ CmovX(kSpeculationPoisonRegister, xzr,
- FlagsConditionToCondition(condition));
- __ Csdb();
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3143,7 +3019,6 @@ void CodeGenerator::AssembleConstructFrame() {
// arguments count was pushed.
required_slots -=
unoptimized_frame_slots - TurboAssembler::kExtraSlotClaimedByPrologue;
- ResetSpeculationPoison();
}
#if V8_ENABLE_WEBASSEMBLY
@@ -3343,7 +3218,9 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// number of arguments is given by max(1 + argc_reg, parameter_slots).
Label argc_reg_has_final_count;
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
- __ Add(argc_reg, argc_reg, 1); // Consider the receiver.
+ if (!kJSArgcIncludesReceiver) {
+ __ Add(argc_reg, argc_reg, 1); // Consider the receiver.
+ }
if (parameter_slots > 1) {
__ Cmp(argc_reg, Operand(parameter_slots));
__ B(&argc_reg_has_final_count, ge);
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 3f2e6151b6..d57203639e 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -11,423 +11,337 @@ namespace compiler {
// ARM64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Arm64Add) \
- V(Arm64Add32) \
- V(Arm64And) \
- V(Arm64And32) \
- V(Arm64Bic) \
- V(Arm64Bic32) \
- V(Arm64Clz) \
- V(Arm64Clz32) \
- V(Arm64Cmp) \
- V(Arm64Cmp32) \
- V(Arm64Cmn) \
- V(Arm64Cmn32) \
- V(Arm64Cnt) \
- V(Arm64Tst) \
- V(Arm64Tst32) \
- V(Arm64Or) \
- V(Arm64Or32) \
- V(Arm64Orn) \
- V(Arm64Orn32) \
- V(Arm64Eor) \
- V(Arm64Eor32) \
- V(Arm64Eon) \
- V(Arm64Eon32) \
- V(Arm64Sadalp) \
- V(Arm64Saddlp) \
- V(Arm64Sub) \
- V(Arm64Sub32) \
- V(Arm64Mul) \
- V(Arm64Mul32) \
- V(Arm64Smull) \
- V(Arm64Smull2) \
- V(Arm64Uadalp) \
- V(Arm64Uaddlp) \
- V(Arm64Umull) \
- V(Arm64Umull2) \
- V(Arm64Madd) \
- V(Arm64Madd32) \
- V(Arm64Msub) \
- V(Arm64Msub32) \
- V(Arm64Mneg) \
- V(Arm64Mneg32) \
- V(Arm64Idiv) \
- V(Arm64Idiv32) \
- V(Arm64Udiv) \
- V(Arm64Udiv32) \
- V(Arm64Imod) \
- V(Arm64Imod32) \
- V(Arm64Umod) \
- V(Arm64Umod32) \
- V(Arm64Not) \
- V(Arm64Not32) \
- V(Arm64Lsl) \
- V(Arm64Lsl32) \
- V(Arm64Lsr) \
- V(Arm64Lsr32) \
- V(Arm64Asr) \
- V(Arm64Asr32) \
- V(Arm64Ror) \
- V(Arm64Ror32) \
- V(Arm64Mov32) \
- V(Arm64Sxtb32) \
- V(Arm64Sxth32) \
- V(Arm64Sxtb) \
- V(Arm64Sxth) \
- V(Arm64Sxtw) \
- V(Arm64Sbfx) \
- V(Arm64Sbfx32) \
- V(Arm64Ubfx) \
- V(Arm64Ubfx32) \
- V(Arm64Ubfiz32) \
- V(Arm64Bfi) \
- V(Arm64Rbit) \
- V(Arm64Rbit32) \
- V(Arm64Rev) \
- V(Arm64Rev32) \
- V(Arm64TestAndBranch32) \
- V(Arm64TestAndBranch) \
- V(Arm64CompareAndBranch32) \
- V(Arm64CompareAndBranch) \
- V(Arm64Claim) \
- V(Arm64Poke) \
- V(Arm64PokePair) \
- V(Arm64Peek) \
- V(Arm64Float32Cmp) \
- V(Arm64Float32Add) \
- V(Arm64Float32Sub) \
- V(Arm64Float32Mul) \
- V(Arm64Float32Div) \
- V(Arm64Float32Abs) \
- V(Arm64Float32Abd) \
- V(Arm64Float32Neg) \
- V(Arm64Float32Sqrt) \
- V(Arm64Float32Fnmul) \
- V(Arm64Float32RoundDown) \
- V(Arm64Float32Max) \
- V(Arm64Float32Min) \
- V(Arm64Float64Cmp) \
- V(Arm64Float64Add) \
- V(Arm64Float64Sub) \
- V(Arm64Float64Mul) \
- V(Arm64Float64Div) \
- V(Arm64Float64Mod) \
- V(Arm64Float64Max) \
- V(Arm64Float64Min) \
- V(Arm64Float64Abs) \
- V(Arm64Float64Abd) \
- V(Arm64Float64Neg) \
- V(Arm64Float64Sqrt) \
- V(Arm64Float64Fnmul) \
- V(Arm64Float64RoundDown) \
- V(Arm64Float32RoundUp) \
- V(Arm64Float64RoundUp) \
- V(Arm64Float64RoundTiesAway) \
- V(Arm64Float32RoundTruncate) \
- V(Arm64Float64RoundTruncate) \
- V(Arm64Float32RoundTiesEven) \
- V(Arm64Float64RoundTiesEven) \
- V(Arm64Float64SilenceNaN) \
- V(Arm64Float32ToFloat64) \
- V(Arm64Float64ToFloat32) \
- V(Arm64Float32ToInt32) \
- V(Arm64Float64ToInt32) \
- V(Arm64Float32ToUint32) \
- V(Arm64Float64ToUint32) \
- V(Arm64Float32ToInt64) \
- V(Arm64Float64ToInt64) \
- V(Arm64Float32ToUint64) \
- V(Arm64Float64ToUint64) \
- V(Arm64Int32ToFloat32) \
- V(Arm64Int32ToFloat64) \
- V(Arm64Int64ToFloat32) \
- V(Arm64Int64ToFloat64) \
- V(Arm64Uint32ToFloat32) \
- V(Arm64Uint32ToFloat64) \
- V(Arm64Uint64ToFloat32) \
- V(Arm64Uint64ToFloat64) \
- V(Arm64Float64ExtractLowWord32) \
- V(Arm64Float64ExtractHighWord32) \
- V(Arm64Float64InsertLowWord32) \
- V(Arm64Float64InsertHighWord32) \
- V(Arm64Float64MoveU64) \
- V(Arm64U64MoveFloat64) \
- V(Arm64LdrS) \
- V(Arm64StrS) \
- V(Arm64LdrD) \
- V(Arm64StrD) \
- V(Arm64LdrQ) \
- V(Arm64StrQ) \
- V(Arm64Ldrb) \
- V(Arm64Ldrsb) \
- V(Arm64LdrsbW) \
- V(Arm64Strb) \
- V(Arm64Ldrh) \
- V(Arm64Ldrsh) \
- V(Arm64LdrshW) \
- V(Arm64Strh) \
- V(Arm64Ldrsw) \
- V(Arm64LdrW) \
- V(Arm64StrW) \
- V(Arm64Ldr) \
- V(Arm64LdrDecompressTaggedSigned) \
- V(Arm64LdrDecompressTaggedPointer) \
- V(Arm64LdrDecompressAnyTagged) \
- V(Arm64Str) \
- V(Arm64StrCompressTagged) \
- V(Arm64DmbIsh) \
- V(Arm64DsbIsb) \
- V(Arm64Sxtl) \
- V(Arm64Sxtl2) \
- V(Arm64Uxtl) \
- V(Arm64Uxtl2) \
- V(Arm64F64x2Splat) \
- V(Arm64F64x2ExtractLane) \
- V(Arm64F64x2ReplaceLane) \
- V(Arm64F64x2Abs) \
- V(Arm64F64x2Neg) \
- V(Arm64F64x2Sqrt) \
- V(Arm64F64x2Add) \
- V(Arm64F64x2Sub) \
- V(Arm64F64x2Mul) \
- V(Arm64F64x2MulElement) \
- V(Arm64F64x2Div) \
- V(Arm64F64x2Min) \
- V(Arm64F64x2Max) \
- V(Arm64F64x2Eq) \
- V(Arm64F64x2Ne) \
- V(Arm64F64x2Lt) \
- V(Arm64F64x2Le) \
- V(Arm64F64x2Qfma) \
- V(Arm64F64x2Qfms) \
- V(Arm64F64x2Pmin) \
- V(Arm64F64x2Pmax) \
- V(Arm64F64x2ConvertLowI32x4S) \
- V(Arm64F64x2ConvertLowI32x4U) \
- V(Arm64F64x2PromoteLowF32x4) \
- V(Arm64F32x4Splat) \
- V(Arm64F32x4ExtractLane) \
- V(Arm64F32x4ReplaceLane) \
- V(Arm64F32x4SConvertI32x4) \
- V(Arm64F32x4UConvertI32x4) \
- V(Arm64F32x4Abs) \
- V(Arm64F32x4Neg) \
- V(Arm64F32x4Sqrt) \
- V(Arm64F32x4RecipApprox) \
- V(Arm64F32x4RecipSqrtApprox) \
- V(Arm64F32x4Add) \
- V(Arm64F32x4Sub) \
- V(Arm64F32x4Mul) \
- V(Arm64F32x4MulElement) \
- V(Arm64F32x4Div) \
- V(Arm64F32x4Min) \
- V(Arm64F32x4Max) \
- V(Arm64F32x4Eq) \
- V(Arm64F32x4Ne) \
- V(Arm64F32x4Lt) \
- V(Arm64F32x4Le) \
- V(Arm64F32x4Qfma) \
- V(Arm64F32x4Qfms) \
- V(Arm64F32x4Pmin) \
- V(Arm64F32x4Pmax) \
- V(Arm64F32x4DemoteF64x2Zero) \
- V(Arm64I64x2Splat) \
- V(Arm64I64x2ExtractLane) \
- V(Arm64I64x2ReplaceLane) \
- V(Arm64I64x2Abs) \
- V(Arm64I64x2Neg) \
- V(Arm64I64x2Shl) \
- V(Arm64I64x2ShrS) \
- V(Arm64I64x2Add) \
- V(Arm64I64x2Sub) \
- V(Arm64I64x2Mul) \
- V(Arm64I64x2Eq) \
- V(Arm64I64x2Ne) \
- V(Arm64I64x2GtS) \
- V(Arm64I64x2GeS) \
- V(Arm64I64x2ShrU) \
- V(Arm64I64x2BitMask) \
- V(Arm64I32x4Splat) \
- V(Arm64I32x4ExtractLane) \
- V(Arm64I32x4ReplaceLane) \
- V(Arm64I32x4SConvertF32x4) \
- V(Arm64I32x4Neg) \
- V(Arm64I32x4Shl) \
- V(Arm64I32x4ShrS) \
- V(Arm64I32x4Add) \
- V(Arm64I32x4Sub) \
- V(Arm64I32x4Mul) \
- V(Arm64I32x4Mla) \
- V(Arm64I32x4Mls) \
- V(Arm64I32x4MinS) \
- V(Arm64I32x4MaxS) \
- V(Arm64I32x4Eq) \
- V(Arm64I32x4Ne) \
- V(Arm64I32x4GtS) \
- V(Arm64I32x4GeS) \
- V(Arm64I32x4UConvertF32x4) \
- V(Arm64I32x4ShrU) \
- V(Arm64I32x4MinU) \
- V(Arm64I32x4MaxU) \
- V(Arm64I32x4GtU) \
- V(Arm64I32x4GeU) \
- V(Arm64I32x4Abs) \
- V(Arm64I32x4BitMask) \
- V(Arm64I32x4DotI16x8S) \
- V(Arm64I32x4TruncSatF64x2SZero) \
- V(Arm64I32x4TruncSatF64x2UZero) \
- V(Arm64I16x8Splat) \
- V(Arm64I16x8ExtractLaneU) \
- V(Arm64I16x8ExtractLaneS) \
- V(Arm64I16x8ReplaceLane) \
- V(Arm64I16x8Neg) \
- V(Arm64I16x8Shl) \
- V(Arm64I16x8ShrS) \
- V(Arm64I16x8SConvertI32x4) \
- V(Arm64I16x8Add) \
- V(Arm64I16x8AddSatS) \
- V(Arm64I16x8Sub) \
- V(Arm64I16x8SubSatS) \
- V(Arm64I16x8Mul) \
- V(Arm64I16x8Mla) \
- V(Arm64I16x8Mls) \
- V(Arm64I16x8MinS) \
- V(Arm64I16x8MaxS) \
- V(Arm64I16x8Eq) \
- V(Arm64I16x8Ne) \
- V(Arm64I16x8GtS) \
- V(Arm64I16x8GeS) \
- V(Arm64I16x8ShrU) \
- V(Arm64I16x8UConvertI32x4) \
- V(Arm64I16x8AddSatU) \
- V(Arm64I16x8SubSatU) \
- V(Arm64I16x8MinU) \
- V(Arm64I16x8MaxU) \
- V(Arm64I16x8GtU) \
- V(Arm64I16x8GeU) \
- V(Arm64I16x8RoundingAverageU) \
- V(Arm64I16x8Q15MulRSatS) \
- V(Arm64I16x8Abs) \
- V(Arm64I16x8BitMask) \
- V(Arm64I8x16Splat) \
- V(Arm64I8x16ExtractLaneU) \
- V(Arm64I8x16ExtractLaneS) \
- V(Arm64I8x16ReplaceLane) \
- V(Arm64I8x16Neg) \
- V(Arm64I8x16Shl) \
- V(Arm64I8x16ShrS) \
- V(Arm64I8x16SConvertI16x8) \
- V(Arm64I8x16Add) \
- V(Arm64I8x16AddSatS) \
- V(Arm64I8x16Sub) \
- V(Arm64I8x16SubSatS) \
- V(Arm64I8x16Mla) \
- V(Arm64I8x16Mls) \
- V(Arm64I8x16MinS) \
- V(Arm64I8x16MaxS) \
- V(Arm64I8x16Eq) \
- V(Arm64I8x16Ne) \
- V(Arm64I8x16GtS) \
- V(Arm64I8x16GeS) \
- V(Arm64I8x16ShrU) \
- V(Arm64I8x16UConvertI16x8) \
- V(Arm64I8x16AddSatU) \
- V(Arm64I8x16SubSatU) \
- V(Arm64I8x16MinU) \
- V(Arm64I8x16MaxU) \
- V(Arm64I8x16GtU) \
- V(Arm64I8x16GeU) \
- V(Arm64I8x16RoundingAverageU) \
- V(Arm64I8x16Abs) \
- V(Arm64I8x16BitMask) \
- V(Arm64S128Const) \
- V(Arm64S128Zero) \
- V(Arm64S128Dup) \
- V(Arm64S128And) \
- V(Arm64S128Or) \
- V(Arm64S128Xor) \
- V(Arm64S128Not) \
- V(Arm64S128Select) \
- V(Arm64S128AndNot) \
- V(Arm64S32x4ZipLeft) \
- V(Arm64S32x4ZipRight) \
- V(Arm64S32x4UnzipLeft) \
- V(Arm64S32x4UnzipRight) \
- V(Arm64S32x4TransposeLeft) \
- V(Arm64S32x4TransposeRight) \
- V(Arm64S32x4Shuffle) \
- V(Arm64S16x8ZipLeft) \
- V(Arm64S16x8ZipRight) \
- V(Arm64S16x8UnzipLeft) \
- V(Arm64S16x8UnzipRight) \
- V(Arm64S16x8TransposeLeft) \
- V(Arm64S16x8TransposeRight) \
- V(Arm64S8x16ZipLeft) \
- V(Arm64S8x16ZipRight) \
- V(Arm64S8x16UnzipLeft) \
- V(Arm64S8x16UnzipRight) \
- V(Arm64S8x16TransposeLeft) \
- V(Arm64S8x16TransposeRight) \
- V(Arm64S8x16Concat) \
- V(Arm64I8x16Swizzle) \
- V(Arm64I8x16Shuffle) \
- V(Arm64S32x2Reverse) \
- V(Arm64S16x4Reverse) \
- V(Arm64S16x2Reverse) \
- V(Arm64S8x8Reverse) \
- V(Arm64S8x4Reverse) \
- V(Arm64S8x2Reverse) \
- V(Arm64V128AnyTrue) \
- V(Arm64I64x2AllTrue) \
- V(Arm64I32x4AllTrue) \
- V(Arm64I16x8AllTrue) \
- V(Arm64I8x16AllTrue) \
- V(Arm64LoadSplat) \
- V(Arm64LoadLane) \
- V(Arm64StoreLane) \
- V(Arm64S128Load8x8S) \
- V(Arm64S128Load8x8U) \
- V(Arm64S128Load16x4S) \
- V(Arm64S128Load16x4U) \
- V(Arm64S128Load32x2S) \
- V(Arm64S128Load32x2U) \
- V(Arm64Word64AtomicLoadUint8) \
- V(Arm64Word64AtomicLoadUint16) \
- V(Arm64Word64AtomicLoadUint32) \
- V(Arm64Word64AtomicLoadUint64) \
- V(Arm64Word64AtomicStoreWord8) \
- V(Arm64Word64AtomicStoreWord16) \
- V(Arm64Word64AtomicStoreWord32) \
- V(Arm64Word64AtomicStoreWord64) \
- V(Arm64Word64AtomicAddUint8) \
- V(Arm64Word64AtomicAddUint16) \
- V(Arm64Word64AtomicAddUint32) \
- V(Arm64Word64AtomicAddUint64) \
- V(Arm64Word64AtomicSubUint8) \
- V(Arm64Word64AtomicSubUint16) \
- V(Arm64Word64AtomicSubUint32) \
- V(Arm64Word64AtomicSubUint64) \
- V(Arm64Word64AtomicAndUint8) \
- V(Arm64Word64AtomicAndUint16) \
- V(Arm64Word64AtomicAndUint32) \
- V(Arm64Word64AtomicAndUint64) \
- V(Arm64Word64AtomicOrUint8) \
- V(Arm64Word64AtomicOrUint16) \
- V(Arm64Word64AtomicOrUint32) \
- V(Arm64Word64AtomicOrUint64) \
- V(Arm64Word64AtomicXorUint8) \
- V(Arm64Word64AtomicXorUint16) \
- V(Arm64Word64AtomicXorUint32) \
- V(Arm64Word64AtomicXorUint64) \
- V(Arm64Word64AtomicExchangeUint8) \
- V(Arm64Word64AtomicExchangeUint16) \
- V(Arm64Word64AtomicExchangeUint32) \
- V(Arm64Word64AtomicExchangeUint64) \
- V(Arm64Word64AtomicCompareExchangeUint8) \
- V(Arm64Word64AtomicCompareExchangeUint16) \
- V(Arm64Word64AtomicCompareExchangeUint32) \
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(Arm64Add) \
+ V(Arm64Add32) \
+ V(Arm64And) \
+ V(Arm64And32) \
+ V(Arm64Bic) \
+ V(Arm64Bic32) \
+ V(Arm64Clz) \
+ V(Arm64Clz32) \
+ V(Arm64Cmp) \
+ V(Arm64Cmp32) \
+ V(Arm64Cmn) \
+ V(Arm64Cmn32) \
+ V(Arm64Cnt) \
+ V(Arm64Cnt32) \
+ V(Arm64Cnt64) \
+ V(Arm64Tst) \
+ V(Arm64Tst32) \
+ V(Arm64Or) \
+ V(Arm64Or32) \
+ V(Arm64Orn) \
+ V(Arm64Orn32) \
+ V(Arm64Eor) \
+ V(Arm64Eor32) \
+ V(Arm64Eon) \
+ V(Arm64Eon32) \
+ V(Arm64Sadalp) \
+ V(Arm64Saddlp) \
+ V(Arm64Sub) \
+ V(Arm64Sub32) \
+ V(Arm64Mul) \
+ V(Arm64Mul32) \
+ V(Arm64Smlal) \
+ V(Arm64Smlal2) \
+ V(Arm64Smull) \
+ V(Arm64Smull2) \
+ V(Arm64Uadalp) \
+ V(Arm64Uaddlp) \
+ V(Arm64Umlal) \
+ V(Arm64Umlal2) \
+ V(Arm64Umull) \
+ V(Arm64Umull2) \
+ V(Arm64Madd) \
+ V(Arm64Madd32) \
+ V(Arm64Msub) \
+ V(Arm64Msub32) \
+ V(Arm64Mneg) \
+ V(Arm64Mneg32) \
+ V(Arm64Idiv) \
+ V(Arm64Idiv32) \
+ V(Arm64Udiv) \
+ V(Arm64Udiv32) \
+ V(Arm64Imod) \
+ V(Arm64Imod32) \
+ V(Arm64Umod) \
+ V(Arm64Umod32) \
+ V(Arm64Not) \
+ V(Arm64Not32) \
+ V(Arm64Lsl) \
+ V(Arm64Lsl32) \
+ V(Arm64Lsr) \
+ V(Arm64Lsr32) \
+ V(Arm64Asr) \
+ V(Arm64Asr32) \
+ V(Arm64Ror) \
+ V(Arm64Ror32) \
+ V(Arm64Mov32) \
+ V(Arm64Sxtb32) \
+ V(Arm64Sxth32) \
+ V(Arm64Sxtb) \
+ V(Arm64Sxth) \
+ V(Arm64Sxtw) \
+ V(Arm64Sbfx) \
+ V(Arm64Sbfx32) \
+ V(Arm64Ubfx) \
+ V(Arm64Ubfx32) \
+ V(Arm64Ubfiz32) \
+ V(Arm64Bfi) \
+ V(Arm64Rbit) \
+ V(Arm64Rbit32) \
+ V(Arm64Rev) \
+ V(Arm64Rev32) \
+ V(Arm64TestAndBranch32) \
+ V(Arm64TestAndBranch) \
+ V(Arm64CompareAndBranch32) \
+ V(Arm64CompareAndBranch) \
+ V(Arm64Claim) \
+ V(Arm64Poke) \
+ V(Arm64PokePair) \
+ V(Arm64Peek) \
+ V(Arm64Float32Cmp) \
+ V(Arm64Float32Add) \
+ V(Arm64Float32Sub) \
+ V(Arm64Float32Mul) \
+ V(Arm64Float32Div) \
+ V(Arm64Float32Abs) \
+ V(Arm64Float32Abd) \
+ V(Arm64Float32Neg) \
+ V(Arm64Float32Sqrt) \
+ V(Arm64Float32Fnmul) \
+ V(Arm64Float32RoundDown) \
+ V(Arm64Float32Max) \
+ V(Arm64Float32Min) \
+ V(Arm64Float64Cmp) \
+ V(Arm64Float64Add) \
+ V(Arm64Float64Sub) \
+ V(Arm64Float64Mul) \
+ V(Arm64Float64Div) \
+ V(Arm64Float64Mod) \
+ V(Arm64Float64Max) \
+ V(Arm64Float64Min) \
+ V(Arm64Float64Abs) \
+ V(Arm64Float64Abd) \
+ V(Arm64Float64Neg) \
+ V(Arm64Float64Sqrt) \
+ V(Arm64Float64Fnmul) \
+ V(Arm64Float64RoundDown) \
+ V(Arm64Float32RoundUp) \
+ V(Arm64Float64RoundUp) \
+ V(Arm64Float64RoundTiesAway) \
+ V(Arm64Float32RoundTruncate) \
+ V(Arm64Float64RoundTruncate) \
+ V(Arm64Float32RoundTiesEven) \
+ V(Arm64Float64RoundTiesEven) \
+ V(Arm64Float64SilenceNaN) \
+ V(Arm64Float32ToFloat64) \
+ V(Arm64Float64ToFloat32) \
+ V(Arm64Float32ToInt32) \
+ V(Arm64Float64ToInt32) \
+ V(Arm64Float32ToUint32) \
+ V(Arm64Float64ToUint32) \
+ V(Arm64Float32ToInt64) \
+ V(Arm64Float64ToInt64) \
+ V(Arm64Float32ToUint64) \
+ V(Arm64Float64ToUint64) \
+ V(Arm64Int32ToFloat32) \
+ V(Arm64Int32ToFloat64) \
+ V(Arm64Int64ToFloat32) \
+ V(Arm64Int64ToFloat64) \
+ V(Arm64Uint32ToFloat32) \
+ V(Arm64Uint32ToFloat64) \
+ V(Arm64Uint64ToFloat32) \
+ V(Arm64Uint64ToFloat64) \
+ V(Arm64Float64ExtractLowWord32) \
+ V(Arm64Float64ExtractHighWord32) \
+ V(Arm64Float64InsertLowWord32) \
+ V(Arm64Float64InsertHighWord32) \
+ V(Arm64Float64MoveU64) \
+ V(Arm64U64MoveFloat64) \
+ V(Arm64LdrS) \
+ V(Arm64StrS) \
+ V(Arm64LdrD) \
+ V(Arm64StrD) \
+ V(Arm64LdrQ) \
+ V(Arm64StrQ) \
+ V(Arm64Ldrb) \
+ V(Arm64Ldrsb) \
+ V(Arm64LdrsbW) \
+ V(Arm64Strb) \
+ V(Arm64Ldrh) \
+ V(Arm64Ldrsh) \
+ V(Arm64LdrshW) \
+ V(Arm64Strh) \
+ V(Arm64Ldrsw) \
+ V(Arm64LdrW) \
+ V(Arm64StrW) \
+ V(Arm64Ldr) \
+ V(Arm64LdrDecompressTaggedSigned) \
+ V(Arm64LdrDecompressTaggedPointer) \
+ V(Arm64LdrDecompressAnyTagged) \
+ V(Arm64LdarDecompressTaggedSigned) \
+ V(Arm64LdarDecompressTaggedPointer) \
+ V(Arm64LdarDecompressAnyTagged) \
+ V(Arm64Str) \
+ V(Arm64StrCompressTagged) \
+ V(Arm64StlrCompressTagged) \
+ V(Arm64DmbIsh) \
+ V(Arm64DsbIsb) \
+ V(Arm64Sxtl) \
+ V(Arm64Sxtl2) \
+ V(Arm64Uxtl) \
+ V(Arm64Uxtl2) \
+ V(Arm64FSplat) \
+ V(Arm64FAbs) \
+ V(Arm64FSqrt) \
+ V(Arm64FNeg) \
+ V(Arm64FExtractLane) \
+ V(Arm64FReplaceLane) \
+ V(Arm64FAdd) \
+ V(Arm64FSub) \
+ V(Arm64FMul) \
+ V(Arm64FMulElement) \
+ V(Arm64FDiv) \
+ V(Arm64FMin) \
+ V(Arm64FMax) \
+ V(Arm64FEq) \
+ V(Arm64FNe) \
+ V(Arm64FLt) \
+ V(Arm64FLe) \
+ V(Arm64F64x2Qfma) \
+ V(Arm64F64x2Qfms) \
+ V(Arm64F64x2Pmin) \
+ V(Arm64F64x2Pmax) \
+ V(Arm64F64x2ConvertLowI32x4S) \
+ V(Arm64F64x2ConvertLowI32x4U) \
+ V(Arm64F64x2PromoteLowF32x4) \
+ V(Arm64F32x4SConvertI32x4) \
+ V(Arm64F32x4UConvertI32x4) \
+ V(Arm64F32x4RecipApprox) \
+ V(Arm64F32x4RecipSqrtApprox) \
+ V(Arm64F32x4Qfma) \
+ V(Arm64F32x4Qfms) \
+ V(Arm64F32x4Pmin) \
+ V(Arm64F32x4Pmax) \
+ V(Arm64F32x4DemoteF64x2Zero) \
+ V(Arm64ISplat) \
+ V(Arm64IAbs) \
+ V(Arm64INeg) \
+ V(Arm64IExtractLane) \
+ V(Arm64IReplaceLane) \
+ V(Arm64I64x2Shl) \
+ V(Arm64I64x2ShrS) \
+ V(Arm64IAdd) \
+ V(Arm64ISub) \
+ V(Arm64I64x2Mul) \
+ V(Arm64IEq) \
+ V(Arm64INe) \
+ V(Arm64IGtS) \
+ V(Arm64IGeS) \
+ V(Arm64I64x2ShrU) \
+ V(Arm64I64x2BitMask) \
+ V(Arm64I32x4SConvertF32x4) \
+ V(Arm64I32x4Shl) \
+ V(Arm64I32x4ShrS) \
+ V(Arm64I32x4Mul) \
+ V(Arm64Mla) \
+ V(Arm64Mls) \
+ V(Arm64IMinS) \
+ V(Arm64IMaxS) \
+ V(Arm64I32x4UConvertF32x4) \
+ V(Arm64I32x4ShrU) \
+ V(Arm64IMinU) \
+ V(Arm64IMaxU) \
+ V(Arm64IGtU) \
+ V(Arm64IGeU) \
+ V(Arm64I32x4BitMask) \
+ V(Arm64I32x4DotI16x8S) \
+ V(Arm64I32x4TruncSatF64x2SZero) \
+ V(Arm64I32x4TruncSatF64x2UZero) \
+ V(Arm64IExtractLaneU) \
+ V(Arm64IExtractLaneS) \
+ V(Arm64I16x8Shl) \
+ V(Arm64I16x8ShrS) \
+ V(Arm64I16x8SConvertI32x4) \
+ V(Arm64IAddSatS) \
+ V(Arm64ISubSatS) \
+ V(Arm64I16x8Mul) \
+ V(Arm64I16x8ShrU) \
+ V(Arm64I16x8UConvertI32x4) \
+ V(Arm64IAddSatU) \
+ V(Arm64ISubSatU) \
+ V(Arm64RoundingAverageU) \
+ V(Arm64I16x8Q15MulRSatS) \
+ V(Arm64I16x8BitMask) \
+ V(Arm64I8x16Shl) \
+ V(Arm64I8x16ShrS) \
+ V(Arm64I8x16SConvertI16x8) \
+ V(Arm64I8x16ShrU) \
+ V(Arm64I8x16UConvertI16x8) \
+ V(Arm64I8x16BitMask) \
+ V(Arm64S128Const) \
+ V(Arm64S128Zero) \
+ V(Arm64S128Dup) \
+ V(Arm64S128And) \
+ V(Arm64S128Or) \
+ V(Arm64S128Xor) \
+ V(Arm64S128Not) \
+ V(Arm64S128Select) \
+ V(Arm64S128AndNot) \
+ V(Arm64Ssra) \
+ V(Arm64Usra) \
+ V(Arm64S32x4ZipLeft) \
+ V(Arm64S32x4ZipRight) \
+ V(Arm64S32x4UnzipLeft) \
+ V(Arm64S32x4UnzipRight) \
+ V(Arm64S32x4TransposeLeft) \
+ V(Arm64S32x4TransposeRight) \
+ V(Arm64S32x4Shuffle) \
+ V(Arm64S16x8ZipLeft) \
+ V(Arm64S16x8ZipRight) \
+ V(Arm64S16x8UnzipLeft) \
+ V(Arm64S16x8UnzipRight) \
+ V(Arm64S16x8TransposeLeft) \
+ V(Arm64S16x8TransposeRight) \
+ V(Arm64S8x16ZipLeft) \
+ V(Arm64S8x16ZipRight) \
+ V(Arm64S8x16UnzipLeft) \
+ V(Arm64S8x16UnzipRight) \
+ V(Arm64S8x16TransposeLeft) \
+ V(Arm64S8x16TransposeRight) \
+ V(Arm64S8x16Concat) \
+ V(Arm64I8x16Swizzle) \
+ V(Arm64I8x16Shuffle) \
+ V(Arm64S32x2Reverse) \
+ V(Arm64S16x4Reverse) \
+ V(Arm64S16x2Reverse) \
+ V(Arm64S8x8Reverse) \
+ V(Arm64S8x4Reverse) \
+ V(Arm64S8x2Reverse) \
+ V(Arm64V128AnyTrue) \
+ V(Arm64I64x2AllTrue) \
+ V(Arm64I32x4AllTrue) \
+ V(Arm64I16x8AllTrue) \
+ V(Arm64I8x16AllTrue) \
+ V(Arm64LoadSplat) \
+ V(Arm64LoadLane) \
+ V(Arm64StoreLane) \
+ V(Arm64S128Load8x8S) \
+ V(Arm64S128Load8x8U) \
+ V(Arm64S128Load16x4S) \
+ V(Arm64S128Load16x4U) \
+ V(Arm64S128Load32x2S) \
+ V(Arm64S128Load32x2U) \
+ V(Arm64Word64AtomicLoadUint64) \
+ V(Arm64Word64AtomicStoreWord64) \
+ V(Arm64Word64AtomicAddUint64) \
+ V(Arm64Word64AtomicSubUint64) \
+ V(Arm64Word64AtomicAndUint64) \
+ V(Arm64Word64AtomicOrUint64) \
+ V(Arm64Word64AtomicXorUint64) \
+ V(Arm64Word64AtomicExchangeUint64) \
V(Arm64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index f4446cdbf8..bb16b76aaf 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -26,6 +26,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Cmn:
case kArm64Cmn32:
case kArm64Cnt:
+ case kArm64Cnt32:
+ case kArm64Cnt64:
case kArm64Tst:
case kArm64Tst32:
case kArm64Or:
@@ -42,10 +44,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Sub32:
case kArm64Mul:
case kArm64Mul32:
+ case kArm64Smlal:
+ case kArm64Smlal2:
case kArm64Smull:
case kArm64Smull2:
case kArm64Uadalp:
case kArm64Uaddlp:
+ case kArm64Umlal:
+ case kArm64Umlal2:
case kArm64Umull:
case kArm64Umull2:
case kArm64Madd:
@@ -147,23 +153,23 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Float64MoveU64:
case kArm64U64MoveFloat64:
case kArm64Float64SilenceNaN:
- case kArm64F64x2Splat:
- case kArm64F64x2ExtractLane:
- case kArm64F64x2ReplaceLane:
- case kArm64F64x2Abs:
- case kArm64F64x2Neg:
- case kArm64F64x2Sqrt:
- case kArm64F64x2Add:
- case kArm64F64x2Sub:
- case kArm64F64x2Mul:
- case kArm64F64x2MulElement:
- case kArm64F64x2Div:
- case kArm64F64x2Min:
- case kArm64F64x2Max:
- case kArm64F64x2Eq:
- case kArm64F64x2Ne:
- case kArm64F64x2Lt:
- case kArm64F64x2Le:
+ case kArm64FExtractLane:
+ case kArm64FReplaceLane:
+ case kArm64FSplat:
+ case kArm64FAbs:
+ case kArm64FSqrt:
+ case kArm64FNeg:
+ case kArm64FAdd:
+ case kArm64FSub:
+ case kArm64FMul:
+ case kArm64FMulElement:
+ case kArm64FDiv:
+ case kArm64FMin:
+ case kArm64FMax:
+ case kArm64FEq:
+ case kArm64FNe:
+ case kArm64FLt:
+ case kArm64FLe:
case kArm64F64x2Qfma:
case kArm64F64x2Qfms:
case kArm64F64x2Pmin:
@@ -171,144 +177,73 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F64x2ConvertLowI32x4S:
case kArm64F64x2ConvertLowI32x4U:
case kArm64F64x2PromoteLowF32x4:
- case kArm64F32x4Splat:
- case kArm64F32x4ExtractLane:
- case kArm64F32x4ReplaceLane:
case kArm64F32x4SConvertI32x4:
case kArm64F32x4UConvertI32x4:
- case kArm64F32x4Abs:
- case kArm64F32x4Neg:
- case kArm64F32x4Sqrt:
case kArm64F32x4RecipApprox:
case kArm64F32x4RecipSqrtApprox:
- case kArm64F32x4Add:
- case kArm64F32x4Sub:
- case kArm64F32x4Mul:
- case kArm64F32x4MulElement:
- case kArm64F32x4Div:
- case kArm64F32x4Min:
- case kArm64F32x4Max:
- case kArm64F32x4Eq:
- case kArm64F32x4Ne:
- case kArm64F32x4Lt:
- case kArm64F32x4Le:
case kArm64F32x4Qfma:
case kArm64F32x4Qfms:
case kArm64F32x4Pmin:
case kArm64F32x4Pmax:
case kArm64F32x4DemoteF64x2Zero:
- case kArm64I64x2Splat:
- case kArm64I64x2ExtractLane:
- case kArm64I64x2ReplaceLane:
- case kArm64I64x2Abs:
- case kArm64I64x2Neg:
+ case kArm64IExtractLane:
+ case kArm64IReplaceLane:
+ case kArm64ISplat:
+ case kArm64IAbs:
+ case kArm64INeg:
+ case kArm64Mla:
+ case kArm64Mls:
+ case kArm64RoundingAverageU:
case kArm64I64x2Shl:
case kArm64I64x2ShrS:
- case kArm64I64x2Add:
- case kArm64I64x2Sub:
+ case kArm64IAdd:
+ case kArm64ISub:
case kArm64I64x2Mul:
- case kArm64I64x2Eq:
- case kArm64I64x2Ne:
- case kArm64I64x2GtS:
- case kArm64I64x2GeS:
+ case kArm64IEq:
+ case kArm64INe:
+ case kArm64IGtS:
+ case kArm64IGeS:
case kArm64I64x2ShrU:
case kArm64I64x2BitMask:
- case kArm64I32x4Splat:
- case kArm64I32x4ExtractLane:
- case kArm64I32x4ReplaceLane:
case kArm64I32x4SConvertF32x4:
case kArm64Sxtl:
case kArm64Sxtl2:
case kArm64Uxtl:
case kArm64Uxtl2:
- case kArm64I32x4Neg:
case kArm64I32x4Shl:
case kArm64I32x4ShrS:
- case kArm64I32x4Add:
- case kArm64I32x4Sub:
case kArm64I32x4Mul:
- case kArm64I32x4Mla:
- case kArm64I32x4Mls:
- case kArm64I32x4MinS:
- case kArm64I32x4MaxS:
- case kArm64I32x4Eq:
- case kArm64I32x4Ne:
- case kArm64I32x4GtS:
- case kArm64I32x4GeS:
+ case kArm64IMinS:
+ case kArm64IMaxS:
case kArm64I32x4UConvertF32x4:
case kArm64I32x4ShrU:
- case kArm64I32x4MinU:
- case kArm64I32x4MaxU:
- case kArm64I32x4GtU:
- case kArm64I32x4GeU:
- case kArm64I32x4Abs:
+ case kArm64IMinU:
+ case kArm64IMaxU:
+ case kArm64IGtU:
+ case kArm64IGeU:
case kArm64I32x4BitMask:
case kArm64I32x4DotI16x8S:
case kArm64I32x4TruncSatF64x2SZero:
case kArm64I32x4TruncSatF64x2UZero:
- case kArm64I16x8Splat:
- case kArm64I16x8ExtractLaneU:
- case kArm64I16x8ExtractLaneS:
- case kArm64I16x8ReplaceLane:
- case kArm64I16x8Neg:
+ case kArm64IExtractLaneU:
+ case kArm64IExtractLaneS:
case kArm64I16x8Shl:
case kArm64I16x8ShrS:
case kArm64I16x8SConvertI32x4:
- case kArm64I16x8Add:
- case kArm64I16x8AddSatS:
- case kArm64I16x8Sub:
- case kArm64I16x8SubSatS:
+ case kArm64IAddSatS:
+ case kArm64ISubSatS:
case kArm64I16x8Mul:
- case kArm64I16x8Mla:
- case kArm64I16x8Mls:
- case kArm64I16x8MinS:
- case kArm64I16x8MaxS:
- case kArm64I16x8Eq:
- case kArm64I16x8Ne:
- case kArm64I16x8GtS:
- case kArm64I16x8GeS:
case kArm64I16x8ShrU:
case kArm64I16x8UConvertI32x4:
- case kArm64I16x8AddSatU:
- case kArm64I16x8SubSatU:
- case kArm64I16x8MinU:
- case kArm64I16x8MaxU:
- case kArm64I16x8GtU:
- case kArm64I16x8GeU:
- case kArm64I16x8RoundingAverageU:
+ case kArm64IAddSatU:
+ case kArm64ISubSatU:
case kArm64I16x8Q15MulRSatS:
- case kArm64I16x8Abs:
case kArm64I16x8BitMask:
- case kArm64I8x16Splat:
- case kArm64I8x16ExtractLaneU:
- case kArm64I8x16ExtractLaneS:
- case kArm64I8x16ReplaceLane:
- case kArm64I8x16Neg:
case kArm64I8x16Shl:
case kArm64I8x16ShrS:
case kArm64I8x16SConvertI16x8:
- case kArm64I8x16Add:
- case kArm64I8x16AddSatS:
- case kArm64I8x16Sub:
- case kArm64I8x16SubSatS:
- case kArm64I8x16Mla:
- case kArm64I8x16Mls:
- case kArm64I8x16MinS:
- case kArm64I8x16MaxS:
- case kArm64I8x16Eq:
- case kArm64I8x16Ne:
- case kArm64I8x16GtS:
- case kArm64I8x16GeS:
case kArm64I8x16UConvertI16x8:
- case kArm64I8x16AddSatU:
- case kArm64I8x16SubSatU:
case kArm64I8x16ShrU:
- case kArm64I8x16MinU:
- case kArm64I8x16MaxU:
- case kArm64I8x16GtU:
- case kArm64I8x16GeU:
- case kArm64I8x16RoundingAverageU:
- case kArm64I8x16Abs:
case kArm64I8x16BitMask:
case kArm64S128Const:
case kArm64S128Zero:
@@ -319,6 +254,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64S128Not:
case kArm64S128Select:
case kArm64S128AndNot:
+ case kArm64Ssra:
+ case kArm64Usra:
case kArm64S32x4ZipLeft:
case kArm64S32x4ZipRight:
case kArm64S32x4UnzipLeft:
@@ -373,6 +310,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64LdrDecompressTaggedSigned:
case kArm64LdrDecompressTaggedPointer:
case kArm64LdrDecompressAnyTagged:
+ case kArm64LdarDecompressTaggedSigned:
+ case kArm64LdarDecompressTaggedPointer:
+ case kArm64LdarDecompressAnyTagged:
case kArm64Peek:
case kArm64LoadSplat:
case kArm64LoadLane:
@@ -395,48 +335,22 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64StrW:
case kArm64Str:
case kArm64StrCompressTagged:
+ case kArm64StlrCompressTagged:
case kArm64DmbIsh:
case kArm64DsbIsb:
case kArm64StoreLane:
return kHasSideEffect;
- case kArm64Word64AtomicLoadUint8:
- case kArm64Word64AtomicLoadUint16:
- case kArm64Word64AtomicLoadUint32:
case kArm64Word64AtomicLoadUint64:
return kIsLoadOperation;
- case kArm64Word64AtomicStoreWord8:
- case kArm64Word64AtomicStoreWord16:
- case kArm64Word64AtomicStoreWord32:
case kArm64Word64AtomicStoreWord64:
- case kArm64Word64AtomicAddUint8:
- case kArm64Word64AtomicAddUint16:
- case kArm64Word64AtomicAddUint32:
case kArm64Word64AtomicAddUint64:
- case kArm64Word64AtomicSubUint8:
- case kArm64Word64AtomicSubUint16:
- case kArm64Word64AtomicSubUint32:
case kArm64Word64AtomicSubUint64:
- case kArm64Word64AtomicAndUint8:
- case kArm64Word64AtomicAndUint16:
- case kArm64Word64AtomicAndUint32:
case kArm64Word64AtomicAndUint64:
- case kArm64Word64AtomicOrUint8:
- case kArm64Word64AtomicOrUint16:
- case kArm64Word64AtomicOrUint32:
case kArm64Word64AtomicOrUint64:
- case kArm64Word64AtomicXorUint8:
- case kArm64Word64AtomicXorUint16:
- case kArm64Word64AtomicXorUint32:
case kArm64Word64AtomicXorUint64:
- case kArm64Word64AtomicExchangeUint8:
- case kArm64Word64AtomicExchangeUint16:
- case kArm64Word64AtomicExchangeUint32:
case kArm64Word64AtomicExchangeUint64:
- case kArm64Word64AtomicCompareExchangeUint8:
- case kArm64Word64AtomicCompareExchangeUint16:
- case kArm64Word64AtomicCompareExchangeUint32:
case kArm64Word64AtomicCompareExchangeUint64:
return kHasSideEffect;
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 6a1a101e35..d102ecabb2 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -190,7 +190,8 @@ void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
}
}
-void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+void VisitRRI(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
Arm64OperandGenerator g(selector);
int32_t imm = OpParameter<int32_t>(node->op());
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -205,7 +206,8 @@ void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
g.UseOperand(node->InputAt(1), operand_mode));
}
-void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+void VisitRRIR(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
Arm64OperandGenerator g(selector);
int32_t imm = OpParameter<int32_t>(node->op());
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -845,10 +847,6 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
if (node->opcode() == IrOpcode::kProtectedLoad) {
opcode |= AccessModeField::encode(kMemoryAccessProtected);
}
@@ -856,8 +854,6 @@ void InstructionSelector::VisitLoad(Node* node) {
EmitLoad(this, node, opcode, immediate_mode, rep);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitStore(Node* node) {
@@ -1441,6 +1437,8 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
#define RR_OP_LIST(V) \
V(Word64Clz, kArm64Clz) \
V(Word32Clz, kArm64Clz32) \
+ V(Word32Popcnt, kArm64Cnt32) \
+ V(Word64Popcnt, kArm64Cnt64) \
V(Word32ReverseBits, kArm64Rbit32) \
V(Word64ReverseBits, kArm64Rbit) \
V(Word32ReverseBytes, kArm64Rev32) \
@@ -1531,10 +1529,6 @@ void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
-
-void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
-
void InstructionSelector::VisitInt32Add(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1938,7 +1932,9 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
Node* value = node->InputAt(0);
- if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kLoadImmutable) &&
+ CanCover(node, value)) {
// Generate sign-extending load.
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
MachineRepresentation rep = load_rep.representation();
@@ -2324,9 +2320,6 @@ template <int N>
bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node,
typename CbzOrTbzMatchTrait<N>::IntegralType value,
Node* user, FlagsCondition cond, FlagsContinuation* cont) {
- // Branch poisoning requires flags to be set, so when it's enabled for
- // a particular branch, we shouldn't be applying the cbz/tbz optimization.
- DCHECK(!cont->IsPoisoned());
// Only handle branches and deoptimisations.
if (!cont->IsBranch() && !cont->IsDeoptimize()) return false;
@@ -2414,7 +2407,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
std::swap(left, right);
}
- if (opcode == kArm64Cmp && !cont->IsPoisoned()) {
+ if (opcode == kArm64Cmp) {
Int64Matcher m(right);
if (m.HasResolvedValue()) {
if (TryEmitCbzOrTbz<64>(selector, left, m.ResolvedValue(), node,
@@ -2432,19 +2425,17 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Int32BinopMatcher m(node);
FlagsCondition cond = cont->condition();
- if (!cont->IsPoisoned()) {
- if (m.right().HasResolvedValue()) {
- if (TryEmitCbzOrTbz<32>(selector, m.left().node(),
- m.right().ResolvedValue(), node, cond, cont)) {
- return;
- }
- } else if (m.left().HasResolvedValue()) {
- FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
- if (TryEmitCbzOrTbz<32>(selector, m.right().node(),
- m.left().ResolvedValue(), node, commuted_cond,
- cont)) {
- return;
- }
+ if (m.right().HasResolvedValue()) {
+ if (TryEmitCbzOrTbz<32>(selector, m.left().node(),
+ m.right().ResolvedValue(), node, cond, cont)) {
+ return;
+ }
+ } else if (m.left().HasResolvedValue()) {
+ FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
+ if (TryEmitCbzOrTbz<32>(selector, m.right().node(),
+ m.left().ResolvedValue(), node, commuted_cond,
+ cont)) {
+ return;
}
}
ArchOpcode opcode = kArm64Cmp32;
@@ -2533,8 +2524,7 @@ struct TestAndBranchMatcher {
Matcher matcher_;
void Initialize() {
- if (cont_->IsBranch() && !cont_->IsPoisoned() &&
- matcher_.right().HasResolvedValue() &&
+ if (cont_->IsBranch() && matcher_.right().HasResolvedValue() &&
base::bits::IsPowerOfTwo(matcher_.right().ResolvedValue())) {
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont_->condition() == kEqual) ||
@@ -2583,7 +2573,7 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2592,13 +2582,14 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
g.UseUniqueRegister(value)};
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
+ InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2609,40 +2600,149 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
g.UseUniqueRegister(new_value)};
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
+ InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand temps[] = {g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
+
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit LDAR.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ InstructionCode code;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicLoadWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ code = kArm64Word64AtomicLoadUint64;
+ break;
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ code = kArm64LdarDecompressTaggedSigned;
+ break;
+ case MachineRepresentation::kTaggedPointer:
+ code = kArm64LdarDecompressTaggedPointer;
+ break;
+ case MachineRepresentation::kTagged:
+ code = kArm64LdarDecompressAnyTagged;
+ break;
+#else
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ if (kTaggedSize == 8) {
+ code = kArm64Word64AtomicLoadUint64;
+ } else {
+ code = kAtomicLoadWord32;
+ }
+ break;
+#endif
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ DCHECK(COMPRESS_POINTERS_BOOL);
+ code = kAtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ code |=
+ AddressingModeField::encode(kMode_MRR) | AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
void VisitAtomicStore(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
+
+ // The memory order is ignored as both release and sequentially consistent
+ // stores can emit STLR.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
+ MachineRepresentation rep = store_params.representation();
+
+ if (FLAG_enable_unconditional_write_barriers &&
+ CanBeTaggedOrCompressedPointer(rep)) {
+ write_barrier_kind = kFullWriteBarrier;
+ }
+
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
g.UseUniqueRegister(value)};
InstructionOperand temps[] = {g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
+ InstructionCode code;
+
+ if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
+ DCHECK(CanBeTaggedOrCompressedPointer(rep));
+ DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
+
+ RecordWriteMode record_write_mode =
+ WriteBarrierKindToRecordWriteMode(write_barrier_kind);
+ code = kArchAtomicStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ } else {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ code = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ code = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicStoreWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ DCHECK_EQ(width, AtomicWidth::kWord64);
+ code = kArm64Word64AtomicStoreWord64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
+ code = kArm64StlrCompressTagged;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ CHECK(COMPRESS_POINTERS_BOOL);
+ DCHECK_EQ(width, AtomicWidth::kWord32);
+ code = kArm64StlrCompressTagged;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ code |= AtomicWidthField::encode(width);
+ }
+
+ code |= AddressingModeField::encode(kMode_MRR);
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps),
temps);
}
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2653,7 +2753,8 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
@@ -2842,7 +2943,7 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
}
// Branch could not be combined with a compare, compare against 0 and branch.
- if (!cont->IsPoisoned() && cont->IsBranch()) {
+ if (cont->IsBranch()) {
Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
g.UseRegister(value), g.Label(cont->true_block()),
g.Label(cont->false_block()));
@@ -3196,159 +3297,91 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = kArm64Word64AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kArm64Word64AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kArm64Word64AtomicLoadUint32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kArm64Word64AtomicLoadUint64;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kArm64Word64AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kArm64Word64AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kArm64Word64AtomicStoreWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kArm64Word64AtomicStoreWord64;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kArm64Word64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kArm64Word64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kArm64Word64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kArm64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kArm64Word64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kArm64Word64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kArm64Word64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kArm64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
@@ -3369,15 +3402,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -3402,14 +3434,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kArm64Word64Atomic##op##Uint8, kArm64Word64Atomic##op##Uint16, \
- kArm64Word64Atomic##op##Uint32, kArm64Word64Atomic##op##Uint64); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kArm64Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -3426,44 +3458,22 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
-#define SIMD_TYPE_LIST(V) \
- V(F64x2) \
- V(F32x4) \
- V(I64x2) \
- V(I32x4) \
- V(I16x8) \
- V(I8x16)
-
#define SIMD_UNOP_LIST(V) \
- V(F64x2Abs, kArm64F64x2Abs) \
- V(F64x2Neg, kArm64F64x2Neg) \
- V(F64x2Sqrt, kArm64F64x2Sqrt) \
V(F64x2ConvertLowI32x4S, kArm64F64x2ConvertLowI32x4S) \
V(F64x2ConvertLowI32x4U, kArm64F64x2ConvertLowI32x4U) \
V(F64x2PromoteLowF32x4, kArm64F64x2PromoteLowF32x4) \
V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
- V(F32x4Abs, kArm64F32x4Abs) \
- V(F32x4Neg, kArm64F32x4Neg) \
- V(F32x4Sqrt, kArm64F32x4Sqrt) \
V(F32x4RecipApprox, kArm64F32x4RecipApprox) \
V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \
V(F32x4DemoteF64x2Zero, kArm64F32x4DemoteF64x2Zero) \
- V(I64x2Abs, kArm64I64x2Abs) \
- V(I64x2Neg, kArm64I64x2Neg) \
V(I64x2BitMask, kArm64I64x2BitMask) \
V(I32x4SConvertF32x4, kArm64I32x4SConvertF32x4) \
- V(I32x4Neg, kArm64I32x4Neg) \
V(I32x4UConvertF32x4, kArm64I32x4UConvertF32x4) \
- V(I32x4Abs, kArm64I32x4Abs) \
V(I32x4BitMask, kArm64I32x4BitMask) \
V(I32x4TruncSatF64x2SZero, kArm64I32x4TruncSatF64x2SZero) \
V(I32x4TruncSatF64x2UZero, kArm64I32x4TruncSatF64x2UZero) \
- V(I16x8Neg, kArm64I16x8Neg) \
- V(I16x8Abs, kArm64I16x8Abs) \
V(I16x8BitMask, kArm64I16x8BitMask) \
- V(I8x16Neg, kArm64I8x16Neg) \
- V(I8x16Abs, kArm64I8x16Abs) \
V(I8x16BitMask, kArm64I8x16BitMask) \
V(S128Not, kArm64S128Not) \
V(V128AnyTrue, kArm64V128AnyTrue) \
@@ -3472,6 +3482,28 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8AllTrue, kArm64I16x8AllTrue) \
V(I8x16AllTrue, kArm64I8x16AllTrue)
+#define SIMD_UNOP_LANE_SIZE_LIST(V) \
+ V(F64x2Splat, kArm64FSplat, 64) \
+ V(F64x2Abs, kArm64FAbs, 64) \
+ V(F64x2Sqrt, kArm64FSqrt, 64) \
+ V(F64x2Neg, kArm64FNeg, 64) \
+ V(F32x4Splat, kArm64FSplat, 32) \
+ V(F32x4Abs, kArm64FAbs, 32) \
+ V(F32x4Sqrt, kArm64FSqrt, 32) \
+ V(F32x4Neg, kArm64FNeg, 32) \
+ V(I64x2Splat, kArm64ISplat, 64) \
+ V(I64x2Abs, kArm64IAbs, 64) \
+ V(I64x2Neg, kArm64INeg, 64) \
+ V(I32x4Splat, kArm64ISplat, 32) \
+ V(I32x4Abs, kArm64IAbs, 32) \
+ V(I32x4Neg, kArm64INeg, 32) \
+ V(I16x8Splat, kArm64ISplat, 16) \
+ V(I16x8Abs, kArm64IAbs, 16) \
+ V(I16x8Neg, kArm64INeg, 16) \
+ V(I8x16Splat, kArm64ISplat, 8) \
+ V(I8x16Abs, kArm64IAbs, 8) \
+ V(I8x16Neg, kArm64INeg, 8)
+
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl, 64) \
V(I64x2ShrS, 64) \
@@ -3487,85 +3519,85 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16ShrU, 8)
#define SIMD_BINOP_LIST(V) \
- V(F64x2Add, kArm64F64x2Add) \
- V(F64x2Sub, kArm64F64x2Sub) \
- V(F64x2Div, kArm64F64x2Div) \
- V(F64x2Min, kArm64F64x2Min) \
- V(F64x2Max, kArm64F64x2Max) \
- V(F64x2Eq, kArm64F64x2Eq) \
- V(F64x2Ne, kArm64F64x2Ne) \
- V(F64x2Lt, kArm64F64x2Lt) \
- V(F64x2Le, kArm64F64x2Le) \
- V(F32x4Add, kArm64F32x4Add) \
- V(F32x4Sub, kArm64F32x4Sub) \
- V(F32x4Div, kArm64F32x4Div) \
- V(F32x4Min, kArm64F32x4Min) \
- V(F32x4Max, kArm64F32x4Max) \
- V(F32x4Eq, kArm64F32x4Eq) \
- V(F32x4Ne, kArm64F32x4Ne) \
- V(F32x4Lt, kArm64F32x4Lt) \
- V(F32x4Le, kArm64F32x4Le) \
- V(I64x2Add, kArm64I64x2Add) \
- V(I64x2Sub, kArm64I64x2Sub) \
- V(I64x2Eq, kArm64I64x2Eq) \
- V(I64x2Ne, kArm64I64x2Ne) \
- V(I64x2GtS, kArm64I64x2GtS) \
- V(I64x2GeS, kArm64I64x2GeS) \
V(I32x4Mul, kArm64I32x4Mul) \
- V(I32x4MinS, kArm64I32x4MinS) \
- V(I32x4MaxS, kArm64I32x4MaxS) \
- V(I32x4Eq, kArm64I32x4Eq) \
- V(I32x4Ne, kArm64I32x4Ne) \
- V(I32x4GtS, kArm64I32x4GtS) \
- V(I32x4GeS, kArm64I32x4GeS) \
- V(I32x4MinU, kArm64I32x4MinU) \
- V(I32x4MaxU, kArm64I32x4MaxU) \
- V(I32x4GtU, kArm64I32x4GtU) \
- V(I32x4GeU, kArm64I32x4GeU) \
V(I32x4DotI16x8S, kArm64I32x4DotI16x8S) \
V(I16x8SConvertI32x4, kArm64I16x8SConvertI32x4) \
- V(I16x8AddSatS, kArm64I16x8AddSatS) \
- V(I16x8SubSatS, kArm64I16x8SubSatS) \
V(I16x8Mul, kArm64I16x8Mul) \
- V(I16x8MinS, kArm64I16x8MinS) \
- V(I16x8MaxS, kArm64I16x8MaxS) \
- V(I16x8Eq, kArm64I16x8Eq) \
- V(I16x8Ne, kArm64I16x8Ne) \
- V(I16x8GtS, kArm64I16x8GtS) \
- V(I16x8GeS, kArm64I16x8GeS) \
V(I16x8UConvertI32x4, kArm64I16x8UConvertI32x4) \
- V(I16x8AddSatU, kArm64I16x8AddSatU) \
- V(I16x8SubSatU, kArm64I16x8SubSatU) \
- V(I16x8MinU, kArm64I16x8MinU) \
- V(I16x8MaxU, kArm64I16x8MaxU) \
- V(I16x8GtU, kArm64I16x8GtU) \
- V(I16x8GeU, kArm64I16x8GeU) \
- V(I16x8RoundingAverageU, kArm64I16x8RoundingAverageU) \
V(I16x8Q15MulRSatS, kArm64I16x8Q15MulRSatS) \
- V(I8x16Add, kArm64I8x16Add) \
- V(I8x16Sub, kArm64I8x16Sub) \
V(I8x16SConvertI16x8, kArm64I8x16SConvertI16x8) \
- V(I8x16AddSatS, kArm64I8x16AddSatS) \
- V(I8x16SubSatS, kArm64I8x16SubSatS) \
- V(I8x16MinS, kArm64I8x16MinS) \
- V(I8x16MaxS, kArm64I8x16MaxS) \
- V(I8x16Eq, kArm64I8x16Eq) \
- V(I8x16Ne, kArm64I8x16Ne) \
- V(I8x16GtS, kArm64I8x16GtS) \
- V(I8x16GeS, kArm64I8x16GeS) \
V(I8x16UConvertI16x8, kArm64I8x16UConvertI16x8) \
- V(I8x16AddSatU, kArm64I8x16AddSatU) \
- V(I8x16SubSatU, kArm64I8x16SubSatU) \
- V(I8x16MinU, kArm64I8x16MinU) \
- V(I8x16MaxU, kArm64I8x16MaxU) \
- V(I8x16GtU, kArm64I8x16GtU) \
- V(I8x16GeU, kArm64I8x16GeU) \
- V(I8x16RoundingAverageU, kArm64I8x16RoundingAverageU) \
V(S128And, kArm64S128And) \
V(S128Or, kArm64S128Or) \
V(S128Xor, kArm64S128Xor) \
V(S128AndNot, kArm64S128AndNot)
+#define SIMD_BINOP_LANE_SIZE_LIST(V) \
+ V(F64x2Min, kArm64FMin, 64) \
+ V(F64x2Max, kArm64FMax, 64) \
+ V(F64x2Add, kArm64FAdd, 64) \
+ V(F64x2Sub, kArm64FSub, 64) \
+ V(F64x2Div, kArm64FDiv, 64) \
+ V(F64x2Eq, kArm64FEq, 64) \
+ V(F64x2Ne, kArm64FNe, 64) \
+ V(F64x2Lt, kArm64FLt, 64) \
+ V(F64x2Le, kArm64FLe, 64) \
+ V(F32x4Min, kArm64FMin, 32) \
+ V(F32x4Max, kArm64FMax, 32) \
+ V(F32x4Add, kArm64FAdd, 32) \
+ V(F32x4Sub, kArm64FSub, 32) \
+ V(F32x4Div, kArm64FDiv, 32) \
+ V(F32x4Eq, kArm64FEq, 32) \
+ V(F32x4Ne, kArm64FNe, 32) \
+ V(F32x4Lt, kArm64FLt, 32) \
+ V(F32x4Le, kArm64FLe, 32) \
+ V(I64x2Sub, kArm64ISub, 64) \
+ V(I64x2Eq, kArm64IEq, 64) \
+ V(I64x2Ne, kArm64INe, 64) \
+ V(I64x2GtS, kArm64IGtS, 64) \
+ V(I64x2GeS, kArm64IGeS, 64) \
+ V(I32x4Eq, kArm64IEq, 32) \
+ V(I32x4Ne, kArm64INe, 32) \
+ V(I32x4GtS, kArm64IGtS, 32) \
+ V(I32x4GeS, kArm64IGeS, 32) \
+ V(I32x4GtU, kArm64IGtU, 32) \
+ V(I32x4GeU, kArm64IGeU, 32) \
+ V(I32x4MinS, kArm64IMinS, 32) \
+ V(I32x4MaxS, kArm64IMaxS, 32) \
+ V(I32x4MinU, kArm64IMinU, 32) \
+ V(I32x4MaxU, kArm64IMaxU, 32) \
+ V(I16x8AddSatS, kArm64IAddSatS, 16) \
+ V(I16x8SubSatS, kArm64ISubSatS, 16) \
+ V(I16x8AddSatU, kArm64IAddSatU, 16) \
+ V(I16x8SubSatU, kArm64ISubSatU, 16) \
+ V(I16x8Eq, kArm64IEq, 16) \
+ V(I16x8Ne, kArm64INe, 16) \
+ V(I16x8GtS, kArm64IGtS, 16) \
+ V(I16x8GeS, kArm64IGeS, 16) \
+ V(I16x8GtU, kArm64IGtU, 16) \
+ V(I16x8GeU, kArm64IGeU, 16) \
+ V(I16x8RoundingAverageU, kArm64RoundingAverageU, 16) \
+ V(I8x16RoundingAverageU, kArm64RoundingAverageU, 8) \
+ V(I16x8MinS, kArm64IMinS, 16) \
+ V(I16x8MaxS, kArm64IMaxS, 16) \
+ V(I16x8MinU, kArm64IMinU, 16) \
+ V(I16x8MaxU, kArm64IMaxU, 16) \
+ V(I8x16Sub, kArm64ISub, 8) \
+ V(I8x16AddSatS, kArm64IAddSatS, 8) \
+ V(I8x16SubSatS, kArm64ISubSatS, 8) \
+ V(I8x16AddSatU, kArm64IAddSatU, 8) \
+ V(I8x16SubSatU, kArm64ISubSatU, 8) \
+ V(I8x16Eq, kArm64IEq, 8) \
+ V(I8x16Ne, kArm64INe, 8) \
+ V(I8x16GtS, kArm64IGtS, 8) \
+ V(I8x16GeS, kArm64IGeS, 8) \
+ V(I8x16GtU, kArm64IGtU, 8) \
+ V(I8x16GeU, kArm64IGeU, 8) \
+ V(I8x16MinS, kArm64IMinS, 8) \
+ V(I8x16MaxS, kArm64IMaxS, 8) \
+ V(I8x16MinU, kArm64IMinU, 8) \
+ V(I8x16MaxU, kArm64IMaxU, 8)
+
void InstructionSelector::VisitS128Const(Node* node) {
Arm64OperandGenerator g(this);
static const int kUint32Immediates = 4;
@@ -3589,34 +3621,34 @@ void InstructionSelector::VisitS128Zero(Node* node) {
Emit(kArm64S128Zero, g.DefineAsRegister(node));
}
-#define SIMD_VISIT_SPLAT(Type) \
- void InstructionSelector::Visit##Type##Splat(Node* node) { \
- VisitRR(this, kArm64##Type##Splat, node); \
- }
-SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
-#undef SIMD_VISIT_SPLAT
-
-#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
- void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
- VisitRRI(this, kArm64##Type##ExtractLane##Sign, node); \
- }
-SIMD_VISIT_EXTRACT_LANE(F64x2, )
-SIMD_VISIT_EXTRACT_LANE(F32x4, )
-SIMD_VISIT_EXTRACT_LANE(I64x2, )
-SIMD_VISIT_EXTRACT_LANE(I32x4, )
-SIMD_VISIT_EXTRACT_LANE(I16x8, U)
-SIMD_VISIT_EXTRACT_LANE(I16x8, S)
-SIMD_VISIT_EXTRACT_LANE(I8x16, U)
-SIMD_VISIT_EXTRACT_LANE(I8x16, S)
+#define SIMD_VISIT_EXTRACT_LANE(Type, T, Sign, LaneSize) \
+ void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
+ VisitRRI(this, \
+ kArm64##T##ExtractLane##Sign | LaneSizeField::encode(LaneSize), \
+ node); \
+ }
+SIMD_VISIT_EXTRACT_LANE(F64x2, F, , 64)
+SIMD_VISIT_EXTRACT_LANE(F32x4, F, , 32)
+SIMD_VISIT_EXTRACT_LANE(I64x2, I, , 64)
+SIMD_VISIT_EXTRACT_LANE(I32x4, I, , 32)
+SIMD_VISIT_EXTRACT_LANE(I16x8, I, U, 16)
+SIMD_VISIT_EXTRACT_LANE(I16x8, I, S, 16)
+SIMD_VISIT_EXTRACT_LANE(I8x16, I, U, 8)
+SIMD_VISIT_EXTRACT_LANE(I8x16, I, S, 8)
#undef SIMD_VISIT_EXTRACT_LANE
-#define SIMD_VISIT_REPLACE_LANE(Type) \
- void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
- VisitRRIR(this, kArm64##Type##ReplaceLane, node); \
- }
-SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+#define SIMD_VISIT_REPLACE_LANE(Type, T, LaneSize) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ VisitRRIR(this, kArm64##T##ReplaceLane | LaneSizeField::encode(LaneSize), \
+ node); \
+ }
+SIMD_VISIT_REPLACE_LANE(F64x2, F, 64)
+SIMD_VISIT_REPLACE_LANE(F32x4, F, 32)
+SIMD_VISIT_REPLACE_LANE(I64x2, I, 64)
+SIMD_VISIT_REPLACE_LANE(I32x4, I, 32)
+SIMD_VISIT_REPLACE_LANE(I16x8, I, 16)
+SIMD_VISIT_REPLACE_LANE(I8x16, I, 8)
#undef SIMD_VISIT_REPLACE_LANE
-#undef SIMD_TYPE_LIST
#define SIMD_VISIT_UNOP(Name, instruction) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -3642,6 +3674,22 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
#undef SIMD_BINOP_LIST
+#define SIMD_VISIT_BINOP_LANE_SIZE(Name, instruction, LaneSize) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, instruction | LaneSizeField::encode(LaneSize), node); \
+ }
+SIMD_BINOP_LANE_SIZE_LIST(SIMD_VISIT_BINOP_LANE_SIZE)
+#undef SIMD_VISIT_BINOP_LANE_SIZE
+#undef SIMD_BINOP_LANE_SIZE_LIST
+
+#define SIMD_VISIT_UNOP_LANE_SIZE(Name, instruction, LaneSize) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, instruction | LaneSizeField::encode(LaneSize), node); \
+ }
+SIMD_UNOP_LANE_SIZE_LIST(SIMD_VISIT_UNOP_LANE_SIZE)
+#undef SIMD_VISIT_UNOP_LANE_SIZE
+#undef SIMD_UNOP_LANE_SIZE_LIST
+
using ShuffleMatcher =
ValueMatcher<S128ImmediateParameter, IrOpcode::kI8x16Shuffle>;
using BinopWithShuffleMatcher = BinopMatcher<ShuffleMatcher, ShuffleMatcher>;
@@ -3702,22 +3750,22 @@ MulWithDupResult TryMatchMulWithDup(Node* node) {
void InstructionSelector::VisitF32x4Mul(Node* node) {
if (MulWithDupResult result = TryMatchMulWithDup<4>(node)) {
Arm64OperandGenerator g(this);
- Emit(kArm64F32x4MulElement, g.DefineAsRegister(node),
- g.UseRegister(result.input), g.UseRegister(result.dup_node),
- g.UseImmediate(result.index));
+ Emit(kArm64FMulElement | LaneSizeField::encode(32),
+ g.DefineAsRegister(node), g.UseRegister(result.input),
+ g.UseRegister(result.dup_node), g.UseImmediate(result.index));
} else {
- return VisitRRR(this, kArm64F32x4Mul, node);
+ return VisitRRR(this, kArm64FMul | LaneSizeField::encode(32), node);
}
}
void InstructionSelector::VisitF64x2Mul(Node* node) {
if (MulWithDupResult result = TryMatchMulWithDup<2>(node)) {
Arm64OperandGenerator g(this);
- Emit(kArm64F64x2MulElement, g.DefineAsRegister(node),
- g.UseRegister(result.input), g.UseRegister(result.dup_node),
- g.UseImmediate(result.index));
+ Emit(kArm64FMulElement | LaneSizeField::encode(64),
+ g.DefineAsRegister(node), g.UseRegister(result.input),
+ g.UseRegister(result.dup_node), g.UseImmediate(result.index));
} else {
- return VisitRRR(this, kArm64F64x2Mul, node);
+ return VisitRRR(this, kArm64FMul | LaneSizeField::encode(64), node);
}
}
@@ -3729,84 +3777,178 @@ void InstructionSelector::VisitI64x2Mul(Node* node) {
arraysize(temps), temps);
}
-#define VISIT_SIMD_ADD(Type, PairwiseType, LaneSize) \
- void InstructionSelector::Visit##Type##Add(Node* node) { \
- Arm64OperandGenerator g(this); \
- Node* left = node->InputAt(0); \
- Node* right = node->InputAt(1); \
- /* Select Mla(z, x, y) for Add(Mul(x, y), z). */ \
- if (left->opcode() == IrOpcode::k##Type##Mul && CanCover(node, left)) { \
- Emit(kArm64##Type##Mla, g.DefineSameAsFirst(node), g.UseRegister(right), \
- g.UseRegister(left->InputAt(0)), g.UseRegister(left->InputAt(1))); \
- return; \
- } \
- /* Select Mla(z, x, y) for Add(z, Mul(x, y)). */ \
- if (right->opcode() == IrOpcode::k##Type##Mul && CanCover(node, right)) { \
- Emit(kArm64##Type##Mla, g.DefineSameAsFirst(node), g.UseRegister(left), \
- g.UseRegister(right->InputAt(0)), \
- g.UseRegister(right->InputAt(1))); \
- return; \
- } \
- /* Select Sadalp(x, y) for Add(x, ExtAddPairwiseS(y)). */ \
- if (right->opcode() == \
- IrOpcode::k##Type##ExtAddPairwise##PairwiseType##S && \
- CanCover(node, right)) { \
- Emit(kArm64Sadalp | LaneSizeField::encode(LaneSize), \
- g.DefineSameAsFirst(node), g.UseRegister(left), \
- g.UseRegister(right->InputAt(0))); \
- return; \
- } \
- /* Select Sadalp(y, x) for Add(ExtAddPairwiseS(x), y). */ \
- if (left->opcode() == \
- IrOpcode::k##Type##ExtAddPairwise##PairwiseType##S && \
- CanCover(node, left)) { \
- Emit(kArm64Sadalp | LaneSizeField::encode(LaneSize), \
- g.DefineSameAsFirst(node), g.UseRegister(right), \
- g.UseRegister(left->InputAt(0))); \
- return; \
- } \
- /* Select Uadalp(x, y) for Add(x, ExtAddPairwiseU(y)). */ \
- if (right->opcode() == \
- IrOpcode::k##Type##ExtAddPairwise##PairwiseType##U && \
- CanCover(node, right)) { \
- Emit(kArm64Uadalp | LaneSizeField::encode(LaneSize), \
- g.DefineSameAsFirst(node), g.UseRegister(left), \
- g.UseRegister(right->InputAt(0))); \
- return; \
- } \
- /* Select Uadalp(y, x) for Add(ExtAddPairwiseU(x), y). */ \
- if (left->opcode() == \
- IrOpcode::k##Type##ExtAddPairwise##PairwiseType##U && \
- CanCover(node, left)) { \
- Emit(kArm64Uadalp | LaneSizeField::encode(LaneSize), \
- g.DefineSameAsFirst(node), g.UseRegister(right), \
- g.UseRegister(left->InputAt(0))); \
- return; \
- } \
- VisitRRR(this, kArm64##Type##Add, node); \
+namespace {
+
+// Used for pattern matching SIMD Add operations where one of the inputs matches
+// |opcode| and ensure that the matched input is on the LHS (input 0).
+struct SimdAddOpMatcher : public NodeMatcher {
+ explicit SimdAddOpMatcher(Node* node, IrOpcode::Value opcode)
+ : NodeMatcher(node),
+ opcode_(opcode),
+ left_(InputAt(0)),
+ right_(InputAt(1)) {
+ DCHECK(HasProperty(Operator::kCommutative));
+ PutOpOnLeft();
+ }
+
+ bool Matches() { return left_->opcode() == opcode_; }
+ Node* left() const { return left_; }
+ Node* right() const { return right_; }
+
+ private:
+ void PutOpOnLeft() {
+ if (right_->opcode() == opcode_) {
+ std::swap(left_, right_);
+ node()->ReplaceInput(0, left_);
+ node()->ReplaceInput(1, right_);
+ }
+ }
+ IrOpcode::Value opcode_;
+ Node* left_;
+ Node* right_;
+};
+
+bool ShraHelper(InstructionSelector* selector, Node* node, int lane_size,
+ InstructionCode shra_code, InstructionCode add_code,
+ IrOpcode::Value shift_op) {
+ Arm64OperandGenerator g(selector);
+ SimdAddOpMatcher m(node, shift_op);
+ if (!m.Matches() || !selector->CanCover(node, m.left())) return false;
+ if (!g.IsIntegerConstant(m.left()->InputAt(1))) return false;
+
+ // If shifting by zero, just do the addition
+ if (g.GetIntegerConstantValue(m.left()->InputAt(1)) % lane_size == 0) {
+ selector->Emit(add_code, g.DefineAsRegister(node),
+ g.UseRegister(m.left()->InputAt(0)),
+ g.UseRegister(m.right()));
+ } else {
+ selector->Emit(shra_code | LaneSizeField::encode(lane_size),
+ g.DefineSameAsFirst(node), g.UseRegister(m.right()),
+ g.UseRegister(m.left()->InputAt(0)),
+ g.UseImmediate(m.left()->InputAt(1)));
+ }
+ return true;
+}
+
+bool AdalpHelper(InstructionSelector* selector, Node* node, int lane_size,
+ InstructionCode adalp_code, IrOpcode::Value ext_op) {
+ Arm64OperandGenerator g(selector);
+ SimdAddOpMatcher m(node, ext_op);
+ if (!m.Matches() || !selector->CanCover(node, m.left())) return false;
+ selector->Emit(adalp_code | LaneSizeField::encode(lane_size),
+ g.DefineSameAsFirst(node), g.UseRegister(m.right()),
+ g.UseRegister(m.left()->InputAt(0)));
+ return true;
+}
+
+bool MlaHelper(InstructionSelector* selector, Node* node,
+ InstructionCode mla_code, IrOpcode::Value mul_op) {
+ Arm64OperandGenerator g(selector);
+ SimdAddOpMatcher m(node, mul_op);
+ if (!m.Matches() || !selector->CanCover(node, m.left())) return false;
+ selector->Emit(mla_code, g.DefineSameAsFirst(node), g.UseRegister(m.right()),
+ g.UseRegister(m.left()->InputAt(0)),
+ g.UseRegister(m.left()->InputAt(1)));
+ return true;
+}
+
+bool SmlalHelper(InstructionSelector* selector, Node* node, int lane_size,
+ InstructionCode smlal_code, IrOpcode::Value ext_mul_op) {
+ Arm64OperandGenerator g(selector);
+ SimdAddOpMatcher m(node, ext_mul_op);
+ if (!m.Matches() || !selector->CanCover(node, m.left())) return false;
+
+ selector->Emit(smlal_code | LaneSizeField::encode(lane_size),
+ g.DefineSameAsFirst(node), g.UseRegister(m.right()),
+ g.UseRegister(m.left()->InputAt(0)),
+ g.UseRegister(m.left()->InputAt(1)));
+ return true;
+}
+
+} // namespace
+
+void InstructionSelector::VisitI64x2Add(Node* node) {
+ if (!ShraHelper(this, node, 64, kArm64Ssra,
+ kArm64IAdd | LaneSizeField::encode(64),
+ IrOpcode::kI64x2ShrS) &&
+ !ShraHelper(this, node, 64, kArm64Usra,
+ kArm64IAdd | LaneSizeField::encode(64),
+ IrOpcode::kI64x2ShrU)) {
+ VisitRRR(this, kArm64IAdd | LaneSizeField::encode(64), node);
+ }
+}
+
+void InstructionSelector::VisitI8x16Add(Node* node) {
+ if (!ShraHelper(this, node, 8, kArm64Ssra,
+ kArm64IAdd | LaneSizeField::encode(8),
+ IrOpcode::kI8x16ShrS) &&
+ !ShraHelper(this, node, 8, kArm64Usra,
+ kArm64IAdd | LaneSizeField::encode(8),
+ IrOpcode::kI8x16ShrU)) {
+ VisitRRR(this, kArm64IAdd | LaneSizeField::encode(8), node);
+ }
+}
+
+#define VISIT_SIMD_ADD(Type, PairwiseType, LaneSize) \
+ void InstructionSelector::Visit##Type##Add(Node* node) { \
+ /* Select Mla(z, x, y) for Add(x, Mul(y, z)). */ \
+ if (MlaHelper(this, node, kArm64Mla | LaneSizeField::encode(LaneSize), \
+ IrOpcode::k##Type##Mul)) { \
+ return; \
+ } \
+ /* Select S/Uadalp(x, y) for Add(x, ExtAddPairwise(y)). */ \
+ if (AdalpHelper(this, node, LaneSize, kArm64Sadalp, \
+ IrOpcode::k##Type##ExtAddPairwise##PairwiseType##S) || \
+ AdalpHelper(this, node, LaneSize, kArm64Uadalp, \
+ IrOpcode::k##Type##ExtAddPairwise##PairwiseType##U)) { \
+ return; \
+ } \
+ /* Select S/Usra(x, y) for Add(x, ShiftRight(y, imm)). */ \
+ if (ShraHelper(this, node, LaneSize, kArm64Ssra, \
+ kArm64IAdd | LaneSizeField::encode(LaneSize), \
+ IrOpcode::k##Type##ShrS) || \
+ ShraHelper(this, node, LaneSize, kArm64Usra, \
+ kArm64IAdd | LaneSizeField::encode(LaneSize), \
+ IrOpcode::k##Type##ShrU)) { \
+ return; \
+ } \
+ /* Select Smlal/Umlal(x, y, z) for Add(x, ExtMulLow(y, z)) and \
+ * Smlal2/Umlal2(x, y, z) for Add(x, ExtMulHigh(y, z)). */ \
+ if (SmlalHelper(this, node, LaneSize, kArm64Smlal, \
+ IrOpcode::k##Type##ExtMulLow##PairwiseType##S) || \
+ SmlalHelper(this, node, LaneSize, kArm64Smlal2, \
+ IrOpcode::k##Type##ExtMulHigh##PairwiseType##S) || \
+ SmlalHelper(this, node, LaneSize, kArm64Umlal, \
+ IrOpcode::k##Type##ExtMulLow##PairwiseType##U) || \
+ SmlalHelper(this, node, LaneSize, kArm64Umlal2, \
+ IrOpcode::k##Type##ExtMulHigh##PairwiseType##U)) { \
+ return; \
+ } \
+ VisitRRR(this, kArm64IAdd | LaneSizeField::encode(LaneSize), node); \
}
VISIT_SIMD_ADD(I32x4, I16x8, 32)
VISIT_SIMD_ADD(I16x8, I8x16, 16)
#undef VISIT_SIMD_ADD
-#define VISIT_SIMD_SUB(Type) \
+#define VISIT_SIMD_SUB(Type, LaneSize) \
void InstructionSelector::Visit##Type##Sub(Node* node) { \
Arm64OperandGenerator g(this); \
Node* left = node->InputAt(0); \
Node* right = node->InputAt(1); \
/* Select Mls(z, x, y) for Sub(z, Mul(x, y)). */ \
if (right->opcode() == IrOpcode::k##Type##Mul && CanCover(node, right)) { \
- Emit(kArm64##Type##Mls, g.DefineSameAsFirst(node), g.UseRegister(left), \
+ Emit(kArm64Mls | LaneSizeField::encode(LaneSize), \
+ g.DefineSameAsFirst(node), g.UseRegister(left), \
g.UseRegister(right->InputAt(0)), \
g.UseRegister(right->InputAt(1))); \
return; \
} \
- VisitRRR(this, kArm64##Type##Sub, node); \
+ VisitRRR(this, kArm64ISub | LaneSizeField::encode(LaneSize), node); \
}
-VISIT_SIMD_SUB(I32x4)
-VISIT_SIMD_SUB(I16x8)
+VISIT_SIMD_SUB(I32x4, 32)
+VISIT_SIMD_SUB(I16x8, 16)
#undef VISIT_SIMD_SUB
void InstructionSelector::VisitS128Select(Node* node) {
@@ -4110,6 +4252,8 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kFloat32RoundTiesEven |
MachineOperatorBuilder::kFloat64RoundTiesEven |
+ MachineOperatorBuilder::kWord32Popcnt |
+ MachineOperatorBuilder::kWord64Popcnt |
MachineOperatorBuilder::kWord32ShiftIsSafe |
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe |
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index 9e378b8458..ad5e18d002 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -41,14 +41,16 @@ class CodeGenerator::JumpTable final : public ZoneObject {
size_t const target_count_;
};
-CodeGenerator::CodeGenerator(
- Zone* codegen_zone, Frame* frame, Linkage* linkage,
- InstructionSequence* instructions, OptimizedCompilationInfo* info,
- Isolate* isolate, base::Optional<OsrHelper> osr_helper,
- int start_source_position, JumpOptimizationInfo* jump_opt,
- PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
- Builtin builtin, size_t max_unoptimized_frame_height,
- size_t max_pushed_argument_count, const char* debug_name)
+CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
+ InstructionSequence* instructions,
+ OptimizedCompilationInfo* info, Isolate* isolate,
+ base::Optional<OsrHelper> osr_helper,
+ int start_source_position,
+ JumpOptimizationInfo* jump_opt,
+ const AssemblerOptions& options, Builtin builtin,
+ size_t max_unoptimized_frame_height,
+ size_t max_pushed_argument_count,
+ const char* debug_name)
: zone_(codegen_zone),
isolate_(isolate),
frame_access_state_(nullptr),
@@ -80,7 +82,6 @@ CodeGenerator::CodeGenerator(
codegen_zone, SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS),
protected_instructions_(codegen_zone),
result_(kSuccess),
- poisoning_level_(poisoning_level),
block_starts_(codegen_zone),
instr_starts_(codegen_zone),
debug_name_(debug_name) {
@@ -284,9 +285,6 @@ void CodeGenerator::AssembleCode() {
BailoutIfDeoptimized();
}
- offsets_info_.init_poison = tasm()->pc_offset();
- InitializeSpeculationPoison();
-
// Define deoptimization literals for all inlined functions.
DCHECK_EQ(0u, deoptimization_literals_.size());
for (OptimizedCompilationInfo::InlinedFunctionHolder& inlined :
@@ -355,8 +353,6 @@ void CodeGenerator::AssembleCode() {
tasm()->bind(GetLabel(current_block_));
- TryInsertBranchPoisoning(block);
-
if (block->must_construct_frame()) {
AssembleConstructFrame();
// We need to setup the root register after we assemble the prologue, to
@@ -494,37 +490,6 @@ void CodeGenerator::AssembleCode() {
result_ = kSuccess;
}
-void CodeGenerator::TryInsertBranchPoisoning(const InstructionBlock* block) {
- // See if our predecessor was a basic block terminated by a branch_and_poison
- // instruction. If yes, then perform the masking based on the flags.
- if (block->PredecessorCount() != 1) return;
- RpoNumber pred_rpo = (block->predecessors())[0];
- const InstructionBlock* pred = instructions()->InstructionBlockAt(pred_rpo);
- if (pred->code_start() == pred->code_end()) return;
- Instruction* instr = instructions()->InstructionAt(pred->code_end() - 1);
- FlagsMode mode = FlagsModeField::decode(instr->opcode());
- switch (mode) {
- case kFlags_branch_and_poison: {
- BranchInfo branch;
- RpoNumber target = ComputeBranchInfo(&branch, instr);
- if (!target.IsValid()) {
- // Non-trivial branch, add the masking code.
- FlagsCondition condition = branch.condition;
- if (branch.false_label == GetLabel(block->rpo_number())) {
- condition = NegateFlagsCondition(condition);
- }
- AssembleBranchPoisoning(condition, instr);
- }
- break;
- }
- case kFlags_deoptimize_and_poison: {
- UNREACHABLE();
- }
- default:
- break;
- }
-}
-
void CodeGenerator::AssembleArchBinarySearchSwitchRange(
Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
std::pair<int32_t, Label*>* end) {
@@ -839,8 +804,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
switch (mode) {
- case kFlags_branch:
- case kFlags_branch_and_poison: {
+ case kFlags_branch: {
BranchInfo branch;
RpoNumber target = ComputeBranchInfo(&branch, instr);
if (target.IsValid()) {
@@ -854,8 +818,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
AssembleArchBranch(instr, &branch);
break;
}
- case kFlags_deoptimize:
- case kFlags_deoptimize_and_poison: {
+ case kFlags_deoptimize: {
// Assemble a conditional eager deoptimization after this instruction.
InstructionOperandConverter i(this, instr);
size_t frame_state_offset =
@@ -864,17 +827,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
DeoptImmedArgsCountField::decode(instr->opcode());
DeoptimizationExit* const exit = AddDeoptimizationExit(
instr, frame_state_offset, immediate_args_count);
- Label continue_label;
BranchInfo branch;
branch.condition = condition;
branch.true_label = exit->label();
- branch.false_label = &continue_label;
+ branch.false_label = exit->continue_label();
branch.fallthru = true;
AssembleArchDeoptBranch(instr, &branch);
- tasm()->bind(&continue_label);
- if (mode == kFlags_deoptimize_and_poison) {
- AssembleBranchPoisoning(NegateFlagsCondition(branch.condition), instr);
- }
tasm()->bind(exit->continue_label());
break;
}
@@ -890,21 +848,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
case kFlags_trap: {
#if V8_ENABLE_WEBASSEMBLY
AssembleArchTrap(instr, condition);
+ break;
#else
UNREACHABLE();
#endif // V8_ENABLE_WEBASSEMBLY
- break;
}
case kFlags_none: {
break;
}
}
- // TODO(jarin) We should thread the flag through rather than set it.
- if (instr->IsCall()) {
- ResetSpeculationPoison();
- }
-
return kSuccess;
}
@@ -1087,9 +1040,9 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
if (needs_frame_state) {
MarkLazyDeoptSite();
- // If the frame state is present, it starts at argument 2 - after
- // the code address and the poison-alias index.
- size_t frame_state_offset = 2;
+ // If the frame state is present, it starts at argument 1 - after
+ // the code address.
+ size_t frame_state_offset = 1;
FrameStateDescriptor* descriptor =
GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
int pc_offset = tasm()->pc_offset_for_safepoint();
@@ -1428,29 +1381,6 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
OutputFrameStateCombine::Ignore());
}
-void CodeGenerator::InitializeSpeculationPoison() {
- if (poisoning_level_ == PoisoningMitigationLevel::kDontPoison) return;
-
- // Initialize {kSpeculationPoisonRegister} either by comparing the expected
- // with the actual call target, or by unconditionally using {-1} initially.
- // Masking register arguments with it only makes sense in the first case.
- if (info()->called_with_code_start_register()) {
- tasm()->RecordComment("-- Prologue: generate speculation poison --");
- GenerateSpeculationPoisonFromCodeStartRegister();
- if (info()->poison_register_arguments()) {
- AssembleRegisterArgumentPoisoning();
- }
- } else {
- ResetSpeculationPoison();
- }
-}
-
-void CodeGenerator::ResetSpeculationPoison() {
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- tasm()->ResetSpeculationPoisonRegister();
- }
-}
-
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
: frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) {
gen->ools_ = this;
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index 7ccb09d5ac..18de20f92c 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -103,7 +103,6 @@ class DeoptimizationLiteral {
struct TurbolizerCodeOffsetsInfo {
int code_start_register_check = -1;
int deopt_check = -1;
- int init_poison = -1;
int blocks_start = -1;
int out_of_line_code = -1;
int deoptimization_exits = -1;
@@ -120,14 +119,16 @@ struct TurbolizerInstructionStartInfo {
// Generates native code for a sequence of instructions.
class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
public:
- explicit CodeGenerator(
- Zone* codegen_zone, Frame* frame, Linkage* linkage,
- InstructionSequence* instructions, OptimizedCompilationInfo* info,
- Isolate* isolate, base::Optional<OsrHelper> osr_helper,
- int start_source_position, JumpOptimizationInfo* jump_opt,
- PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
- Builtin builtin, size_t max_unoptimized_frame_height,
- size_t max_pushed_argument_count, const char* debug_name = nullptr);
+ explicit CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
+ InstructionSequence* instructions,
+ OptimizedCompilationInfo* info, Isolate* isolate,
+ base::Optional<OsrHelper> osr_helper,
+ int start_source_position,
+ JumpOptimizationInfo* jump_opt,
+ const AssemblerOptions& options, Builtin builtin,
+ size_t max_unoptimized_frame_height,
+ size_t max_pushed_argument_count,
+ const char* debug_name = nullptr);
// Generate native code. After calling AssembleCode, call FinalizeCode to
// produce the actual code object. If an error occurs during either phase,
@@ -216,17 +217,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
// Assemble instructions for the specified block.
CodeGenResult AssembleBlock(const InstructionBlock* block);
- // Inserts mask update at the beginning of an instruction block if the
- // predecessor blocks ends with a masking branch.
- void TryInsertBranchPoisoning(const InstructionBlock* block);
-
- // Initializes the masking register in the prologue of a function.
- void InitializeSpeculationPoison();
- // Reset the masking register during execution of a function.
- void ResetSpeculationPoison();
- // Generates a mask from the pc passed in {kJavaScriptCallCodeStartRegister}.
- void GenerateSpeculationPoisonFromCodeStartRegister();
-
// Assemble code for the specified instruction.
CodeGenResult AssembleInstruction(int instruction_index,
const InstructionBlock* block);
@@ -276,18 +266,12 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
// contains the expected pointer to the start of the instruction stream.
void AssembleCodeStartRegisterCheck();
- void AssembleBranchPoisoning(FlagsCondition condition, Instruction* instr);
-
// When entering a code that is marked for deoptimization, rather continuing
// with its execution, we jump to a lazy compiled code. We need to do this
// because this code has already been deoptimized and needs to be unlinked
// from the JS functions referring it.
void BailoutIfDeoptimized();
- // Generates code to poison the stack pointer and implicit register arguments
- // like the context register and the function register.
- void AssembleRegisterArgumentPoisoning();
-
// Generates an architecture-specific, descriptor-specific prologue
// to set up a stack frame.
void AssembleConstructFrame();
@@ -484,7 +468,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
SourcePositionTableBuilder source_position_table_builder_;
ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions_;
CodeGenResult result_;
- PoisoningMitigationLevel poisoning_level_;
ZoneVector<int> block_starts_;
TurbolizerCodeOffsetsInfo offsets_info_;
ZoneVector<TurbolizerInstructionStartInfo> instr_starts_;
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 5db3f20fa4..e03f934ba5 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -5,6 +5,7 @@
#include "src/base/overflowing-math.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/callable.h"
+#include "src/codegen/cpu-features.h"
#include "src/codegen/ia32/assembler-ia32.h"
#include "src/codegen/ia32/register-ia32.h"
#include "src/codegen/macro-assembler.h"
@@ -684,16 +685,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ bind(&skip);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- UNREACHABLE();
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- UNREACHABLE();
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -712,11 +703,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(reg);
- } else {
- __ call(reg);
- }
+ __ call(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -738,19 +725,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
__ wasm_call(wasm_code, constant.rmode());
} else {
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(wasm_code, constant.rmode());
- } else {
- __ call(wasm_code, constant.rmode());
- }
+ __ call(wasm_code, constant.rmode());
}
} else {
- Register reg = i.InputRegister(0);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(reg);
- } else {
- __ call(reg);
- }
+ __ call(i.InputRegister(0));
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -762,12 +740,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Address wasm_code = static_cast<Address>(constant.ToInt32());
__ jmp(wasm_code, constant.rmode());
} else {
- Register reg = i.InputRegister(0);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(i.InputRegister(0));
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -784,11 +757,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(reg);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -800,11 +769,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(reg);
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
break;
@@ -993,7 +958,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(ool->exit());
break;
}
- case kArchStoreWithWriteBarrier: {
+ case kArchStoreWithWriteBarrier: // Fall thrugh.
+ case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
@@ -1005,7 +971,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = zone()->New<OutOfLineRecordWrite>(this, object, operand, value,
scratch0, scratch1, mode,
DetermineStubCallMode());
- __ mov(operand, value);
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ __ mov(operand, value);
+ } else {
+ __ mov(scratch0, value);
+ __ xchg(scratch0, operand);
+ }
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@@ -1278,9 +1249,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32Bswap:
__ bswap(i.OutputRegister());
break;
- case kArchWordPoisonOnSpeculation:
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- UNREACHABLE();
case kIA32MFence:
__ mfence();
break;
@@ -1290,40 +1258,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEFloat32Cmp:
__ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
break;
- case kSSEFloat32Add:
- __ addss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Sub:
- __ subss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Mul:
- __ mulss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Div:
- __ divss(i.InputDoubleRegister(0), i.InputOperand(1));
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a (v)mulss depending on the result.
- __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
- break;
case kSSEFloat32Sqrt:
__ sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEFloat32Abs: {
- // TODO(bmeurer): Use 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psrlq(tmp, 33);
- __ andps(i.OutputDoubleRegister(), tmp);
- break;
- }
- case kSSEFloat32Neg: {
- // TODO(bmeurer): Use 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psllq(tmp, 31);
- __ xorps(i.OutputDoubleRegister(), tmp);
- break;
- }
case kSSEFloat32Round: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
RoundingMode const mode =
@@ -1334,21 +1271,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEFloat64Cmp:
__ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
- case kSSEFloat64Add:
- __ addsd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Sub:
- __ subsd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Mul:
- __ mulsd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Div:
- __ divsd(i.InputDoubleRegister(0), i.InputOperand(1));
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a (v)mulsd depending on the result.
- __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
- break;
case kSSEFloat32Max: {
Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
@@ -1488,22 +1410,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(esp, tmp);
break;
}
- case kSSEFloat64Abs: {
- // TODO(bmeurer): Use 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psrlq(tmp, 1);
- __ andps(i.OutputDoubleRegister(), tmp);
- break;
- }
- case kSSEFloat64Neg: {
- // TODO(bmeurer): Use 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psllq(tmp, 63);
- __ xorps(i.OutputDoubleRegister(), tmp);
- break;
- }
case kSSEFloat64Sqrt:
__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
@@ -1571,94 +1477,106 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEFloat64LoadLowWord32:
__ movd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kAVXFloat32Add: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vaddss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat32Add: {
+ __ Addss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat32Sub: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vsubss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat32Sub: {
+ __ Subss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat32Mul: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vmulss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat32Mul: {
+ __ Mulss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat32Div: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vdivss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat32Div: {
+ __ Divss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulss depending on the result.
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
}
- case kAVXFloat64Add: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat64Add: {
+ __ Addsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat64Sub: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vsubsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat64Sub: {
+ __ Subsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat64Mul: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vmulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat64Mul: {
+ __ Mulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat64Div: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vdivsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat64Div: {
+ __ Divsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulsd depending on the result.
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
}
- case kAVXFloat32Abs: {
+ case kFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psrlq(tmp, 33);
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vandps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psrlq(kScratchDoubleReg, byte{33});
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ } else {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ }
break;
}
- case kAVXFloat32Neg: {
+ case kFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psllq(tmp, 31);
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vxorps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psllq(kScratchDoubleReg, byte{31});
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ } else {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ }
break;
}
- case kAVXFloat64Abs: {
+ case kFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psrlq(tmp, 1);
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vandpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psrlq(kScratchDoubleReg, byte{1});
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ } else {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ }
break;
}
- case kAVXFloat64Neg: {
+ case kFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psllq(tmp, 63);
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psllq(kScratchDoubleReg, byte{63});
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ } else {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ }
break;
}
case kSSEFloat64SilenceNaN:
@@ -2374,48 +2292,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vandnps(dst, dst, kScratchDoubleReg);
break;
}
- case kSSEF32x4Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpeqps(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32F32x4Eq: {
+ __ Cmpeqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXF32x4Eq: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vcmpeqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32F32x4Ne: {
+ __ Cmpneqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEF32x4Ne: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpneqps(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXF32x4Ne: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vcmpneqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEF32x4Lt: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpltps(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXF32x4Lt: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vcmpltps(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEF32x4Le: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpleps(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32F32x4Lt: {
+ __ Cmpltps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXF32x4Le: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vcmpleps(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32F32x4Le: {
+ __ Cmpleps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kIA32F32x4Pmin: {
@@ -2445,20 +2339,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I32x4SConvertF32x4: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- // NAN->0
- __ Cmpeqps(kScratchDoubleReg, src, src);
- __ Pand(dst, src, kScratchDoubleReg);
- // Set top bit if >= 0 (but not -0.0!)
- __ Pxor(kScratchDoubleReg, dst);
- // Convert
- __ Cvttps2dq(dst, dst);
- // Set top bit if >=0 is now < 0
- __ Pand(kScratchDoubleReg, dst);
- __ Psrad(kScratchDoubleReg, kScratchDoubleReg, byte{31});
- // Set positive overflow lanes to 0x7FFFFFFF
- __ Pxor(dst, kScratchDoubleReg);
+ __ I32x4SConvertF32x4(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ i.TempRegister(0));
break;
}
case kIA32I32x4SConvertI16x8Low: {
@@ -2490,117 +2373,63 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT(Psrad, 5);
break;
}
- case kSSEI32x4Add: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddd(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I32x4Add: {
+ __ Paddd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI32x4Add: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I32x4Sub: {
+ __ Psubd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kSSEI32x4Sub: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubd(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I32x4Mul: {
+ __ Pmulld(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI32x4Sub: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I32x4MinS: {
+ __ Pminsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI32x4Mul: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmulld(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I32x4MaxS: {
+ __ Pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI32x4Mul: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmulld(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I32x4Eq: {
+ __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI32x4MinS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminsd(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4MinS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I32x4Ne: {
+ __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
break;
}
- case kSSEI32x4MaxS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxsd(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4MaxS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I32x4GtS: {
+ __ Pcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI32x4Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpeqd(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4Eq: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI32x4Ne: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpeqd(i.OutputSimd128Register(), i.InputOperand(1));
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ xorps(i.OutputSimd128Register(), kScratchDoubleReg);
- break;
- }
- case kAVXI32x4Ne: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpxor(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchDoubleReg);
- break;
- }
- case kSSEI32x4GtS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpgtd(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4GtS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI32x4GeS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ case kIA32I32x4GeS: {
XMMRegister dst = i.OutputSimd128Register();
- Operand src = i.InputOperand(1);
- __ pminsd(dst, src);
- __ pcmpeqd(dst, src);
- break;
- }
- case kAVXI32x4GeS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
- Operand src2 = i.InputOperand(1);
- __ vpminsd(kScratchDoubleReg, src1, src2);
- __ vpcmpeqd(i.OutputSimd128Register(), kScratchDoubleReg, src2);
+ XMMRegister src2 = i.InputSimd128Register(1);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpminsd(kScratchDoubleReg, src1, src2);
+ __ vpcmpeqd(dst, kScratchDoubleReg, src2);
+ } else {
+ DCHECK_EQ(dst, src1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pminsd(dst, src2);
+ __ pcmpeqd(dst, src2);
+ }
break;
}
case kSSEI32x4UConvertF32x4: {
@@ -2671,28 +2500,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT(Psrld, 5);
break;
}
- case kSSEI32x4MinU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminud(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4MinU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminud(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI32x4MaxU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxud(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I32x4MinU: {
+ __ Pminud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI32x4MaxU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxud(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I32x4MaxU: {
+ __ Pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kSSEI32x4GtU: {
@@ -2748,10 +2563,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I16x8Splat: {
- XMMRegister dst = i.OutputSimd128Register();
- __ Movd(dst, i.InputOperand(0));
- __ Pshuflw(dst, dst, uint8_t{0x0});
- __ Pshufd(dst, dst, uint8_t{0x0});
+ if (instr->InputAt(0)->IsRegister()) {
+ __ I16x8Splat(i.OutputSimd128Register(), i.InputRegister(0));
+ } else {
+ __ I16x8Splat(i.OutputSimd128Register(), i.InputOperand(0));
+ }
break;
}
case kIA32I16x8ExtractLaneS: {
@@ -2789,105 +2605,51 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT(Psraw, 4);
break;
}
- case kSSEI16x8SConvertI32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ packssdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
- break;
- }
- case kAVXI16x8SConvertI32x4: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpackssdw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8SConvertI32x4: {
+ __ Packssdw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kSSEI16x8Add: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8Add: {
+ __ Paddw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8Add: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I16x8AddSatS: {
+ __ Paddsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI16x8AddSatS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddsw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8AddSatS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI16x8Sub: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8Sub: {
+ __ Psubw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8Sub: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I16x8SubSatS: {
+ __ Psubsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI16x8SubSatS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubsw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8SubSatS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI16x8Mul: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pmullw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8Mul: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmullw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI16x8MinS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pminsw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8Mul: {
+ __ Pmullw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8MinS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8MinS: {
+ __ Pminsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kSSEI16x8MaxS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pmaxsw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8MaxS: {
+ __ Pmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8MaxS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I16x8Eq: {
+ __ Pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI16x8Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpeqw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8Eq: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
case kSSEI16x8Ne: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpeqw(i.OutputSimd128Register(), i.InputOperand(1));
@@ -2904,15 +2666,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchDoubleReg);
break;
}
- case kSSEI16x8GtS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpgtw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8GtS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpgtw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8GtS: {
+ __ Pcmpgtw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kSSEI16x8GeS: {
@@ -2944,63 +2700,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT(Psrlw, 4);
break;
}
- case kSSEI16x8UConvertI32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ packusdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
- break;
- }
- case kAVXI16x8UConvertI32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister dst = i.OutputSimd128Register();
- __ vpackusdw(dst, dst, i.InputSimd128Register(1));
- break;
- }
- case kSSEI16x8AddSatU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddusw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8AddSatU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8UConvertI32x4: {
+ __ Packusdw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
- case kSSEI16x8SubSatU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubusw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8SubSatU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI16x8MinU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminuw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8AddSatU: {
+ __ Paddusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8MinU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I16x8SubSatU: {
+ __ Psubusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI16x8MaxU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxuw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8MinU: {
+ __ Pminuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8MaxU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8MaxU: {
+ __ Pmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kSSEI16x8GtU: {
@@ -3060,10 +2782,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I8x16Splat: {
- XMMRegister dst = i.OutputSimd128Register();
- __ Movd(dst, i.InputOperand(0));
- __ Pxor(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(dst, kScratchDoubleReg);
+ if (instr->InputAt(0)->IsRegister()) {
+ __ I8x16Splat(i.OutputSimd128Register(), i.InputRegister(0),
+ kScratchDoubleReg);
+ } else {
+ __ I8x16Splat(i.OutputSimd128Register(), i.InputOperand(0),
+ kScratchDoubleReg);
+ }
break;
}
case kIA32I8x16ExtractLaneS: {
@@ -3137,15 +2862,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ S128Store32Lane(operand, i.InputSimd128Register(index), laneidx);
break;
}
- case kSSEI8x16SConvertI16x8: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ packsswb(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI8x16SConvertI16x8: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpacksswb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I8x16SConvertI16x8: {
+ __ Packsswb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kIA32I8x16Neg: {
@@ -3162,64 +2881,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32I8x16Shl: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
+ Register tmp = i.TempRegister(0);
if (HasImmediateInput(instr, 1)) {
- // Perform 16-bit shift, then mask away low bits.
- uint8_t shift = i.InputInt3(1);
- __ Psllw(dst, dst, byte{shift});
-
- uint8_t bmask = static_cast<uint8_t>(0xff << shift);
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- __ mov(tmp, mask);
- __ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, uint8_t{0});
- __ Pand(dst, tmp_simd);
+ __ I8x16Shl(dst, src, i.InputInt3(1), tmp, kScratchDoubleReg);
} else {
- // Take shift value modulo 8.
- __ mov(tmp, i.InputRegister(1));
- __ and_(tmp, 7);
- // Mask off the unwanted bits before word-shifting.
- __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- __ add(tmp, Immediate(8));
- __ Movd(tmp_simd, tmp);
- __ Psrlw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd);
- __ Packuswb(kScratchDoubleReg, kScratchDoubleReg);
- __ Pand(dst, kScratchDoubleReg);
- // TODO(zhin): sub here to avoid asking for another temporary register,
- // examine codegen for other i8x16 shifts, they use less instructions.
- __ sub(tmp, Immediate(8));
- __ Movd(tmp_simd, tmp);
- __ Psllw(dst, dst, tmp_simd);
+ XMMRegister tmp_simd = i.TempSimd128Register(1);
+ __ I8x16Shl(dst, src, i.InputRegister(1), tmp, kScratchDoubleReg,
+ tmp_simd);
}
break;
}
case kIA32I8x16ShrS: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
+
if (HasImmediateInput(instr, 1)) {
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- uint8_t shift = i.InputInt3(1) + 8;
- __ Psraw(kScratchDoubleReg, shift);
- __ Psraw(dst, shift);
- __ Packsswb(dst, kScratchDoubleReg);
+ __ I8x16ShrS(dst, src, i.InputInt3(1), kScratchDoubleReg);
} else {
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
- // Unpack the bytes into words, do arithmetic shifts, and repack.
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- __ mov(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ and_(tmp, 7);
- __ add(tmp, Immediate(8));
- __ Movd(tmp_simd, tmp);
- __ Psraw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd);
- __ Psraw(dst, dst, tmp_simd);
- __ Packsswb(dst, kScratchDoubleReg);
+ __ I8x16ShrS(dst, src, i.InputRegister(1), i.TempRegister(0),
+ kScratchDoubleReg, i.TempSimd128Register(1));
}
break;
}
@@ -3296,18 +2980,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpcmpeqb(i.OutputSimd128Register(), kScratchDoubleReg, src2);
break;
}
- case kSSEI8x16UConvertI16x8: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- XMMRegister dst = i.OutputSimd128Register();
- __ packuswb(dst, i.InputOperand(1));
- break;
- }
- case kAVXI8x16UConvertI16x8: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister dst = i.OutputSimd128Register();
- __ vpackuswb(dst, dst, i.InputOperand(1));
+ case kIA32I8x16UConvertI16x8: {
+ __ Packuswb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kIA32I8x16AddSatU: {
@@ -3322,34 +2997,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32I8x16ShrU: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
+ Register tmp = i.TempRegister(0);
if (HasImmediateInput(instr, 1)) {
- // Perform 16-bit shift, then mask away high bits.
- uint8_t shift = i.InputInt3(1);
- __ Psrlw(dst, dst, byte{shift});
-
- uint8_t bmask = 0xff >> shift;
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- __ mov(tmp, mask);
- __ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, uint8_t{0});
- __ Pand(dst, tmp_simd);
+ __ I8x16ShrU(dst, src, i.InputInt3(1), tmp, kScratchDoubleReg);
} else {
- // Unpack the bytes into words, do logical shifts, and repack.
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- __ mov(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ and_(tmp, 7);
- __ add(tmp, Immediate(8));
- __ Movd(tmp_simd, tmp);
- __ Psrlw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd);
- __ Psrlw(dst, dst, tmp_simd);
- __ Packuswb(dst, kScratchDoubleReg);
+ __ I8x16ShrU(dst, src, i.InputRegister(1), tmp, kScratchDoubleReg,
+ i.TempSimd128Register(1));
}
+
break;
}
case kIA32I8x16MinU: {
@@ -3444,37 +3102,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchDoubleReg);
break;
}
- case kSSES128And: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ andps(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXS128And: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpand(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSES128Or: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ orps(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXS128Or: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpor(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32S128And: {
+ __ Pand(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSES128Xor: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ xorps(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32S128Or: {
+ __ Por(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXS128Xor: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpxor(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32S128Xor: {
+ __ Pxor(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kIA32S128Select: {
@@ -3541,20 +3181,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32S128Load8Splat: {
- __ Pinsrb(i.OutputSimd128Register(), i.MemoryOperand(), 0);
- __ Pxor(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(i.OutputSimd128Register(), kScratchDoubleReg);
+ __ S128Load8Splat(i.OutputSimd128Register(), i.MemoryOperand(),
+ kScratchDoubleReg);
break;
}
case kIA32S128Load16Splat: {
- __ Pinsrw(i.OutputSimd128Register(), i.MemoryOperand(), 0);
- __ Pshuflw(i.OutputSimd128Register(), i.OutputSimd128Register(),
- uint8_t{0});
- __ Punpcklqdq(i.OutputSimd128Register(), i.OutputSimd128Register());
+ __ S128Load16Splat(i.OutputSimd128Register(), i.MemoryOperand(),
+ kScratchDoubleReg);
break;
}
case kIA32S128Load32Splat: {
- __ Vbroadcastss(i.OutputSimd128Register(), i.MemoryOperand());
+ __ S128Load32Splat(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
case kIA32S128Load64Splat: {
@@ -3640,10 +3277,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
__ Pshuflw(dst, src, half_dup);
- __ Pshufd(dst, dst, uint8_t{0});
+ __ Punpcklqdq(dst, dst);
} else {
__ Pshufhw(dst, src, half_dup);
- __ Pshufd(dst, dst, uint8_t{0xaa});
+ __ Punpckhqdq(dst, dst);
}
break;
}
@@ -3671,10 +3308,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
__ Pshuflw(dst, dst, half_dup);
- __ Pshufd(dst, dst, uint8_t{0});
+ __ Punpcklqdq(dst, dst);
} else {
__ Pshufhw(dst, dst, half_dup);
- __ Pshufd(dst, dst, uint8_t{0xaa});
+ __ Punpckhqdq(dst, dst);
}
break;
}
@@ -3937,17 +3574,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32Word32AtomicPairLoad: {
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ movq(tmp, i.MemoryOperand());
- __ Pextrd(i.OutputRegister(0), tmp, 0);
- __ Pextrd(i.OutputRegister(1), tmp, 1);
+ __ movq(kScratchDoubleReg, i.MemoryOperand());
+ __ Pextrd(i.OutputRegister(0), kScratchDoubleReg, 0);
+ __ Pextrd(i.OutputRegister(1), kScratchDoubleReg, 1);
break;
}
- case kIA32Word32AtomicPairStore: {
+ case kIA32Word32ReleasePairStore: {
+ __ push(ebx);
+ i.MoveInstructionOperandToRegister(ebx, instr->InputAt(1));
+ __ push(ebx);
+ i.MoveInstructionOperandToRegister(ebx, instr->InputAt(0));
+ __ push(ebx);
+ frame_access_state()->IncreaseSPDelta(3);
+ __ movq(kScratchDoubleReg, MemOperand(esp, 0));
+ __ pop(ebx);
+ __ pop(ebx);
+ __ pop(ebx);
+ frame_access_state()->IncreaseSPDelta(-3);
+ __ movq(i.MemoryOperand(2), kScratchDoubleReg);
+ break;
+ }
+ case kIA32Word32SeqCstPairStore: {
Label store;
__ bind(&store);
- __ mov(i.TempRegister(0), i.MemoryOperand(2));
- __ mov(i.TempRegister(1), i.NextMemoryOperand(2));
+ __ mov(eax, i.MemoryOperand(2));
+ __ mov(edx, i.NextMemoryOperand(2));
__ push(ebx);
frame_access_state()->IncreaseSPDelta(1);
i.MoveInstructionOperandToRegister(ebx, instr->InputAt(0));
@@ -3958,27 +3609,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(not_equal, &store);
break;
}
- case kWord32AtomicExchangeInt8: {
+ case kAtomicExchangeInt8: {
__ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
__ movsx_b(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeUint8: {
+ case kAtomicExchangeUint8: {
__ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
__ movzx_b(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeInt16: {
+ case kAtomicExchangeInt16: {
__ xchg_w(i.InputRegister(0), i.MemoryOperand(1));
__ movsx_w(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeUint16: {
+ case kAtomicExchangeUint16: {
__ xchg_w(i.InputRegister(0), i.MemoryOperand(1));
__ movzx_w(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeWord32: {
+ case kAtomicExchangeWord32: {
__ xchg(i.InputRegister(0), i.MemoryOperand(1));
break;
}
@@ -3998,31 +3649,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(not_equal, &exchange);
break;
}
- case kWord32AtomicCompareExchangeInt8: {
+ case kAtomicCompareExchangeInt8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
__ movsx_b(eax, eax);
break;
}
- case kWord32AtomicCompareExchangeUint8: {
+ case kAtomicCompareExchangeUint8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_b(eax, eax);
break;
}
- case kWord32AtomicCompareExchangeInt16: {
+ case kAtomicCompareExchangeInt16: {
__ lock();
__ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
__ movsx_w(eax, eax);
break;
}
- case kWord32AtomicCompareExchangeUint16: {
+ case kAtomicCompareExchangeUint16: {
__ lock();
__ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_w(eax, eax);
break;
}
- case kWord32AtomicCompareExchangeWord32: {
+ case kAtomicCompareExchangeWord32: {
__ lock();
__ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
break;
@@ -4038,27 +3689,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: { \
+ case kAtomic##op##Int8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movsx_b(eax, eax); \
break; \
} \
- case kWord32Atomic##op##Uint8: { \
+ case kAtomic##op##Uint8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movzx_b(eax, eax); \
break; \
} \
- case kWord32Atomic##op##Int16: { \
+ case kAtomic##op##Int16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movsx_w(eax, eax); \
break; \
} \
- case kWord32Atomic##op##Uint16: { \
+ case kAtomic##op##Uint16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movzx_w(eax, eax); \
break; \
} \
- case kWord32Atomic##op##Word32: { \
+ case kAtomic##op##Word32: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
break; \
}
@@ -4107,16 +3758,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(not_equal, &binop);
break;
}
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
UNREACHABLE(); // Won't be generated by instruction selector.
- break;
}
return kSuccess;
}
@@ -4126,41 +3776,29 @@ static Condition FlagsConditionToCondition(FlagsCondition condition) {
case kUnorderedEqual:
case kEqual:
return equal;
- break;
case kUnorderedNotEqual:
case kNotEqual:
return not_equal;
- break;
case kSignedLessThan:
return less;
- break;
case kSignedGreaterThanOrEqual:
return greater_equal;
- break;
case kSignedLessThanOrEqual:
return less_equal;
- break;
case kSignedGreaterThan:
return greater;
- break;
case kUnsignedLessThan:
return below;
- break;
case kUnsignedGreaterThanOrEqual:
return above_equal;
- break;
case kUnsignedLessThanOrEqual:
return below_equal;
- break;
case kUnsignedGreaterThan:
return above;
- break;
case kOverflow:
return overflow;
- break;
case kNotOverflow:
return no_overflow;
- break;
default:
UNREACHABLE();
}
@@ -4183,12 +3821,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ jmp(flabel);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- UNREACHABLE();
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -4648,18 +4280,24 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// The number of arguments without the receiver is
// max(argc_reg, parameter_slots-1), and the receiver is added in
// DropArguments().
- int parameter_slots_without_receiver = parameter_slots - 1;
Label mismatch_return;
Register scratch_reg = edx;
DCHECK_NE(argc_reg, scratch_reg);
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & scratch_reg.bit());
- __ cmp(argc_reg, Immediate(parameter_slots_without_receiver));
+ if (kJSArgcIncludesReceiver) {
+ __ cmp(argc_reg, Immediate(parameter_slots));
+ } else {
+ int parameter_slots_without_receiver = parameter_slots - 1;
+ __ cmp(argc_reg, Immediate(parameter_slots_without_receiver));
+ }
__ j(greater, &mismatch_return, Label::kNear);
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return);
__ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// We use a return instead of a jump for better return address prediction.
__ Ret();
} else if (additional_pop_count->IsImmediate()) {
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index 42af3326f3..bb54c726aa 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -48,26 +48,14 @@ namespace compiler {
V(IA32MFence) \
V(IA32LFence) \
V(SSEFloat32Cmp) \
- V(SSEFloat32Add) \
- V(SSEFloat32Sub) \
- V(SSEFloat32Mul) \
- V(SSEFloat32Div) \
- V(SSEFloat32Abs) \
- V(SSEFloat32Neg) \
V(SSEFloat32Sqrt) \
V(SSEFloat32Round) \
V(SSEFloat64Cmp) \
- V(SSEFloat64Add) \
- V(SSEFloat64Sub) \
- V(SSEFloat64Mul) \
- V(SSEFloat64Div) \
V(SSEFloat64Mod) \
V(SSEFloat32Max) \
V(SSEFloat64Max) \
V(SSEFloat32Min) \
V(SSEFloat64Min) \
- V(SSEFloat64Abs) \
- V(SSEFloat64Neg) \
V(SSEFloat64Sqrt) \
V(SSEFloat64Round) \
V(SSEFloat32ToFloat64) \
@@ -86,18 +74,18 @@ namespace compiler {
V(SSEFloat64InsertHighWord32) \
V(SSEFloat64LoadLowWord32) \
V(SSEFloat64SilenceNaN) \
- V(AVXFloat32Add) \
- V(AVXFloat32Sub) \
- V(AVXFloat32Mul) \
- V(AVXFloat32Div) \
- V(AVXFloat64Add) \
- V(AVXFloat64Sub) \
- V(AVXFloat64Mul) \
- V(AVXFloat64Div) \
- V(AVXFloat64Abs) \
- V(AVXFloat64Neg) \
- V(AVXFloat32Abs) \
- V(AVXFloat32Neg) \
+ V(Float32Add) \
+ V(Float32Sub) \
+ V(Float64Add) \
+ V(Float64Sub) \
+ V(Float32Mul) \
+ V(Float32Div) \
+ V(Float64Mul) \
+ V(Float64Div) \
+ V(Float64Abs) \
+ V(Float64Neg) \
+ V(Float32Abs) \
+ V(Float32Neg) \
V(IA32Movsxbl) \
V(IA32Movzxbl) \
V(IA32Movb) \
@@ -177,14 +165,10 @@ namespace compiler {
V(AVXF32x4Min) \
V(SSEF32x4Max) \
V(AVXF32x4Max) \
- V(SSEF32x4Eq) \
- V(AVXF32x4Eq) \
- V(SSEF32x4Ne) \
- V(AVXF32x4Ne) \
- V(SSEF32x4Lt) \
- V(AVXF32x4Lt) \
- V(SSEF32x4Le) \
- V(AVXF32x4Le) \
+ V(IA32F32x4Eq) \
+ V(IA32F32x4Ne) \
+ V(IA32F32x4Lt) \
+ V(IA32F32x4Le) \
V(IA32F32x4Pmin) \
V(IA32F32x4Pmax) \
V(IA32F32x4Round) \
@@ -197,33 +181,22 @@ namespace compiler {
V(IA32I32x4Neg) \
V(IA32I32x4Shl) \
V(IA32I32x4ShrS) \
- V(SSEI32x4Add) \
- V(AVXI32x4Add) \
- V(SSEI32x4Sub) \
- V(AVXI32x4Sub) \
- V(SSEI32x4Mul) \
- V(AVXI32x4Mul) \
- V(SSEI32x4MinS) \
- V(AVXI32x4MinS) \
- V(SSEI32x4MaxS) \
- V(AVXI32x4MaxS) \
- V(SSEI32x4Eq) \
- V(AVXI32x4Eq) \
- V(SSEI32x4Ne) \
- V(AVXI32x4Ne) \
- V(SSEI32x4GtS) \
- V(AVXI32x4GtS) \
- V(SSEI32x4GeS) \
- V(AVXI32x4GeS) \
+ V(IA32I32x4Add) \
+ V(IA32I32x4Sub) \
+ V(IA32I32x4Mul) \
+ V(IA32I32x4MinS) \
+ V(IA32I32x4MaxS) \
+ V(IA32I32x4Eq) \
+ V(IA32I32x4Ne) \
+ V(IA32I32x4GtS) \
+ V(IA32I32x4GeS) \
V(SSEI32x4UConvertF32x4) \
V(AVXI32x4UConvertF32x4) \
V(IA32I32x4UConvertI16x8Low) \
V(IA32I32x4UConvertI16x8High) \
V(IA32I32x4ShrU) \
- V(SSEI32x4MinU) \
- V(AVXI32x4MinU) \
- V(SSEI32x4MaxU) \
- V(AVXI32x4MaxU) \
+ V(IA32I32x4MinU) \
+ V(IA32I32x4MaxU) \
V(SSEI32x4GtU) \
V(AVXI32x4GtU) \
V(SSEI32x4GeU) \
@@ -246,43 +219,28 @@ namespace compiler {
V(IA32I16x8Neg) \
V(IA32I16x8Shl) \
V(IA32I16x8ShrS) \
- V(SSEI16x8SConvertI32x4) \
- V(AVXI16x8SConvertI32x4) \
- V(SSEI16x8Add) \
- V(AVXI16x8Add) \
- V(SSEI16x8AddSatS) \
- V(AVXI16x8AddSatS) \
- V(SSEI16x8Sub) \
- V(AVXI16x8Sub) \
- V(SSEI16x8SubSatS) \
- V(AVXI16x8SubSatS) \
- V(SSEI16x8Mul) \
- V(AVXI16x8Mul) \
- V(SSEI16x8MinS) \
- V(AVXI16x8MinS) \
- V(SSEI16x8MaxS) \
- V(AVXI16x8MaxS) \
- V(SSEI16x8Eq) \
- V(AVXI16x8Eq) \
+ V(IA32I16x8SConvertI32x4) \
+ V(IA32I16x8Add) \
+ V(IA32I16x8AddSatS) \
+ V(IA32I16x8Sub) \
+ V(IA32I16x8SubSatS) \
+ V(IA32I16x8Mul) \
+ V(IA32I16x8MinS) \
+ V(IA32I16x8MaxS) \
+ V(IA32I16x8Eq) \
V(SSEI16x8Ne) \
V(AVXI16x8Ne) \
- V(SSEI16x8GtS) \
- V(AVXI16x8GtS) \
+ V(IA32I16x8GtS) \
V(SSEI16x8GeS) \
V(AVXI16x8GeS) \
V(IA32I16x8UConvertI8x16Low) \
V(IA32I16x8UConvertI8x16High) \
V(IA32I16x8ShrU) \
- V(SSEI16x8UConvertI32x4) \
- V(AVXI16x8UConvertI32x4) \
- V(SSEI16x8AddSatU) \
- V(AVXI16x8AddSatU) \
- V(SSEI16x8SubSatU) \
- V(AVXI16x8SubSatU) \
- V(SSEI16x8MinU) \
- V(AVXI16x8MinU) \
- V(SSEI16x8MaxU) \
- V(AVXI16x8MaxU) \
+ V(IA32I16x8UConvertI32x4) \
+ V(IA32I16x8AddSatU) \
+ V(IA32I16x8SubSatU) \
+ V(IA32I16x8MinU) \
+ V(IA32I16x8MaxU) \
V(SSEI16x8GtU) \
V(AVXI16x8GtU) \
V(SSEI16x8GeU) \
@@ -305,8 +263,7 @@ namespace compiler {
V(IA32Pextrb) \
V(IA32Pextrw) \
V(IA32S128Store32Lane) \
- V(SSEI8x16SConvertI16x8) \
- V(AVXI8x16SConvertI16x8) \
+ V(IA32I8x16SConvertI16x8) \
V(IA32I8x16Neg) \
V(IA32I8x16Shl) \
V(IA32I8x16ShrS) \
@@ -322,8 +279,7 @@ namespace compiler {
V(IA32I8x16GtS) \
V(SSEI8x16GeS) \
V(AVXI8x16GeS) \
- V(SSEI8x16UConvertI16x8) \
- V(AVXI8x16UConvertI16x8) \
+ V(IA32I8x16UConvertI16x8) \
V(IA32I8x16AddSatU) \
V(IA32I8x16SubSatU) \
V(IA32I8x16ShrU) \
@@ -341,12 +297,9 @@ namespace compiler {
V(IA32S128Zero) \
V(IA32S128AllOnes) \
V(IA32S128Not) \
- V(SSES128And) \
- V(AVXS128And) \
- V(SSES128Or) \
- V(AVXS128Or) \
- V(SSES128Xor) \
- V(AVXS128Xor) \
+ V(IA32S128And) \
+ V(IA32S128Or) \
+ V(IA32S128Xor) \
V(IA32S128Select) \
V(IA32S128AndNot) \
V(IA32I8x16Swizzle) \
@@ -402,7 +355,8 @@ namespace compiler {
V(IA32I16x8AllTrue) \
V(IA32I8x16AllTrue) \
V(IA32Word32AtomicPairLoad) \
- V(IA32Word32AtomicPairStore) \
+ V(IA32Word32ReleasePairStore) \
+ V(IA32Word32SeqCstPairStore) \
V(IA32Word32AtomicPairAdd) \
V(IA32Word32AtomicPairSub) \
V(IA32Word32AtomicPairAnd) \
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index 278e7ea99b..3910d45195 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -49,26 +49,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Bswap:
case kIA32Lea:
case kSSEFloat32Cmp:
- case kSSEFloat32Add:
- case kSSEFloat32Sub:
- case kSSEFloat32Mul:
- case kSSEFloat32Div:
- case kSSEFloat32Abs:
- case kSSEFloat32Neg:
case kSSEFloat32Sqrt:
case kSSEFloat32Round:
case kSSEFloat64Cmp:
- case kSSEFloat64Add:
- case kSSEFloat64Sub:
- case kSSEFloat64Mul:
- case kSSEFloat64Div:
case kSSEFloat64Mod:
case kSSEFloat32Max:
case kSSEFloat64Max:
case kSSEFloat32Min:
case kSSEFloat64Min:
- case kSSEFloat64Abs:
- case kSSEFloat64Neg:
case kSSEFloat64Sqrt:
case kSSEFloat64Round:
case kSSEFloat32ToFloat64:
@@ -87,18 +75,18 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat64InsertHighWord32:
case kSSEFloat64LoadLowWord32:
case kSSEFloat64SilenceNaN:
- case kAVXFloat32Add:
- case kAVXFloat32Sub:
- case kAVXFloat32Mul:
- case kAVXFloat32Div:
- case kAVXFloat64Add:
- case kAVXFloat64Sub:
- case kAVXFloat64Mul:
- case kAVXFloat64Div:
- case kAVXFloat64Abs:
- case kAVXFloat64Neg:
- case kAVXFloat32Abs:
- case kAVXFloat32Neg:
+ case kFloat32Add:
+ case kFloat32Sub:
+ case kFloat64Add:
+ case kFloat64Sub:
+ case kFloat32Mul:
+ case kFloat32Div:
+ case kFloat64Mul:
+ case kFloat64Div:
+ case kFloat64Abs:
+ case kFloat64Neg:
+ case kFloat32Abs:
+ case kFloat32Neg:
case kIA32BitcastFI:
case kIA32BitcastIF:
case kIA32F64x2Splat:
@@ -162,14 +150,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXF32x4Min:
case kSSEF32x4Max:
case kAVXF32x4Max:
- case kSSEF32x4Eq:
- case kAVXF32x4Eq:
- case kSSEF32x4Ne:
- case kAVXF32x4Ne:
- case kSSEF32x4Lt:
- case kAVXF32x4Lt:
- case kSSEF32x4Le:
- case kAVXF32x4Le:
+ case kIA32F32x4Eq:
+ case kIA32F32x4Ne:
+ case kIA32F32x4Lt:
+ case kIA32F32x4Le:
case kIA32F32x4Pmin:
case kIA32F32x4Pmax:
case kIA32F32x4Round:
@@ -182,33 +166,22 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I32x4Neg:
case kIA32I32x4Shl:
case kIA32I32x4ShrS:
- case kSSEI32x4Add:
- case kAVXI32x4Add:
- case kSSEI32x4Sub:
- case kAVXI32x4Sub:
- case kSSEI32x4Mul:
- case kAVXI32x4Mul:
- case kSSEI32x4MinS:
- case kAVXI32x4MinS:
- case kSSEI32x4MaxS:
- case kAVXI32x4MaxS:
- case kSSEI32x4Eq:
- case kAVXI32x4Eq:
- case kSSEI32x4Ne:
- case kAVXI32x4Ne:
- case kSSEI32x4GtS:
- case kAVXI32x4GtS:
- case kSSEI32x4GeS:
- case kAVXI32x4GeS:
+ case kIA32I32x4Add:
+ case kIA32I32x4Sub:
+ case kIA32I32x4Mul:
+ case kIA32I32x4MinS:
+ case kIA32I32x4MaxS:
+ case kIA32I32x4Eq:
+ case kIA32I32x4Ne:
+ case kIA32I32x4GtS:
+ case kIA32I32x4GeS:
case kSSEI32x4UConvertF32x4:
case kAVXI32x4UConvertF32x4:
case kIA32I32x4UConvertI16x8Low:
case kIA32I32x4UConvertI16x8High:
case kIA32I32x4ShrU:
- case kSSEI32x4MinU:
- case kAVXI32x4MinU:
- case kSSEI32x4MaxU:
- case kAVXI32x4MaxU:
+ case kIA32I32x4MinU:
+ case kIA32I32x4MaxU:
case kSSEI32x4GtU:
case kAVXI32x4GtU:
case kSSEI32x4GeU:
@@ -231,43 +204,28 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I16x8Neg:
case kIA32I16x8Shl:
case kIA32I16x8ShrS:
- case kSSEI16x8SConvertI32x4:
- case kAVXI16x8SConvertI32x4:
- case kSSEI16x8Add:
- case kAVXI16x8Add:
- case kSSEI16x8AddSatS:
- case kAVXI16x8AddSatS:
- case kSSEI16x8Sub:
- case kAVXI16x8Sub:
- case kSSEI16x8SubSatS:
- case kAVXI16x8SubSatS:
- case kSSEI16x8Mul:
- case kAVXI16x8Mul:
- case kSSEI16x8MinS:
- case kAVXI16x8MinS:
- case kSSEI16x8MaxS:
- case kAVXI16x8MaxS:
- case kSSEI16x8Eq:
- case kAVXI16x8Eq:
+ case kIA32I16x8SConvertI32x4:
+ case kIA32I16x8Add:
+ case kIA32I16x8AddSatS:
+ case kIA32I16x8Sub:
+ case kIA32I16x8SubSatS:
+ case kIA32I16x8Mul:
+ case kIA32I16x8MinS:
+ case kIA32I16x8MaxS:
+ case kIA32I16x8Eq:
case kSSEI16x8Ne:
case kAVXI16x8Ne:
- case kSSEI16x8GtS:
- case kAVXI16x8GtS:
+ case kIA32I16x8GtS:
case kSSEI16x8GeS:
case kAVXI16x8GeS:
case kIA32I16x8UConvertI8x16Low:
case kIA32I16x8UConvertI8x16High:
case kIA32I16x8ShrU:
- case kSSEI16x8UConvertI32x4:
- case kAVXI16x8UConvertI32x4:
- case kSSEI16x8AddSatU:
- case kAVXI16x8AddSatU:
- case kSSEI16x8SubSatU:
- case kAVXI16x8SubSatU:
- case kSSEI16x8MinU:
- case kAVXI16x8MinU:
- case kSSEI16x8MaxU:
- case kAVXI16x8MaxU:
+ case kIA32I16x8UConvertI32x4:
+ case kIA32I16x8AddSatU:
+ case kIA32I16x8SubSatU:
+ case kIA32I16x8MinU:
+ case kIA32I16x8MaxU:
case kSSEI16x8GtU:
case kAVXI16x8GtU:
case kSSEI16x8GeU:
@@ -290,8 +248,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Pextrb:
case kIA32Pextrw:
case kIA32S128Store32Lane:
- case kSSEI8x16SConvertI16x8:
- case kAVXI8x16SConvertI16x8:
+ case kIA32I8x16SConvertI16x8:
case kIA32I8x16Neg:
case kIA32I8x16Shl:
case kIA32I8x16ShrS:
@@ -307,8 +264,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I8x16GtS:
case kSSEI8x16GeS:
case kAVXI8x16GeS:
- case kSSEI8x16UConvertI16x8:
- case kAVXI8x16UConvertI16x8:
+ case kIA32I8x16UConvertI16x8:
case kIA32I8x16AddSatU:
case kIA32I8x16SubSatU:
case kIA32I8x16ShrU:
@@ -326,12 +282,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32S128Zero:
case kIA32S128AllOnes:
case kIA32S128Not:
- case kSSES128And:
- case kAVXS128And:
- case kSSES128Or:
- case kAVXS128Or:
- case kSSES128Xor:
- case kAVXS128Xor:
+ case kIA32S128And:
+ case kIA32S128Or:
+ case kIA32S128Xor:
case kIA32S128Select:
case kIA32S128AndNot:
case kIA32I8x16Swizzle:
@@ -423,7 +376,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Word32AtomicPairLoad:
return kIsLoadOperation;
- case kIA32Word32AtomicPairStore:
+ case kIA32Word32ReleasePairStore:
+ case kIA32Word32SeqCstPairStore:
case kIA32Word32AtomicPairAdd:
case kIA32Word32AtomicPairSub:
case kIA32Word32AtomicPairAnd:
@@ -447,7 +401,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// Basic latency modeling for ia32 instructions. They have been determined
// in an empirical way.
switch (instr->arch_opcode()) {
- case kSSEFloat64Mul:
+ case kFloat64Mul:
return 5;
case kIA32Imul:
case kIA32ImulHigh:
@@ -455,18 +409,18 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kSSEFloat32Cmp:
case kSSEFloat64Cmp:
return 9;
- case kSSEFloat32Add:
- case kSSEFloat32Sub:
- case kSSEFloat32Abs:
- case kSSEFloat32Neg:
- case kSSEFloat64Add:
- case kSSEFloat64Sub:
+ case kFloat32Add:
+ case kFloat32Sub:
+ case kFloat64Add:
+ case kFloat64Sub:
+ case kFloat32Abs:
+ case kFloat32Neg:
case kSSEFloat64Max:
case kSSEFloat64Min:
- case kSSEFloat64Abs:
- case kSSEFloat64Neg:
+ case kFloat64Abs:
+ case kFloat64Neg:
return 5;
- case kSSEFloat32Mul:
+ case kFloat32Mul:
return 4;
case kSSEFloat32ToFloat64:
case kSSEFloat64ToFloat32:
@@ -484,9 +438,9 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return 33;
case kIA32Udiv:
return 26;
- case kSSEFloat32Div:
+ case kFloat32Div:
return 35;
- case kSSEFloat64Div:
+ case kFloat64Div:
return 63;
case kSSEFloat32Sqrt:
case kSSEFloat64Sqrt:
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index f36fdb2935..ce792692f0 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -246,6 +246,41 @@ class IA32OperandGenerator final : public OperandGenerator {
namespace {
+ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
+ ArchOpcode opcode;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ opcode = kIA32Movss;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kIA32Movsd;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kIA32Movl;
+ break;
+ case MachineRepresentation::kSimd128:
+ opcode = kIA32Movdqu;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ }
+ return opcode;
+}
+
void VisitRO(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
IA32OperandGenerator g(selector);
Node* input = node->InputAt(0);
@@ -280,27 +315,24 @@ void VisitRR(InstructionSelector* selector, Node* node,
}
void VisitRROFloat(InstructionSelector* selector, Node* node,
- ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ ArchOpcode opcode) {
IA32OperandGenerator g(selector);
InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
InstructionOperand operand1 = g.Use(node->InputAt(1));
if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
+ selector->Emit(opcode, g.DefineAsRegister(node), operand0, operand1);
} else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
+ selector->Emit(opcode, g.DefineSameAsFirst(node), operand0, operand1);
}
}
void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
- ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ ArchOpcode opcode) {
IA32OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempSimd128Register()};
if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineAsRegister(node), g.UseUnique(input),
- arraysize(temps), temps);
+ selector->Emit(opcode, g.DefineAsRegister(node), g.Use(input));
} else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node),
- g.UseUniqueRegister(input), arraysize(temps), temps);
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
}
}
@@ -329,7 +361,7 @@ void VisitRROSimd(InstructionSelector* selector, Node* node,
InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
if (selector->IsSupported(AVX)) {
selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0,
- g.Use(node->InputAt(1)));
+ g.UseRegister(node->InputAt(1)));
} else {
selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0,
g.UseRegister(node->InputAt(1)));
@@ -389,14 +421,28 @@ void VisitRROSimdShift(InstructionSelector* selector, Node* node,
}
}
-void VisitRROI8x16SimdShift(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+void VisitI8x16Shift(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
IA32OperandGenerator g(selector);
- InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
- selector->Emit(opcode, g.DefineSameAsFirst(node), operand0, operand1,
- arraysize(temps), temps);
+ InstructionOperand output = CpuFeatures::IsSupported(AVX)
+ ? g.UseRegister(node)
+ : g.DefineSameAsFirst(node);
+
+ if (g.CanBeImmediate(node->InputAt(1))) {
+ if (opcode == kIA32I8x16ShrS) {
+ selector->Emit(opcode, output, g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)));
+ } else {
+ InstructionOperand temps[] = {g.TempRegister()};
+ selector->Emit(opcode, output, g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)), arraysize(temps), temps);
+ }
+ } else {
+ InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
+ InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
+ selector->Emit(opcode, output, operand0, operand1, arraysize(temps), temps);
+ }
}
} // namespace
@@ -521,72 +567,110 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitLoad(Node* node, Node* value,
+ InstructionCode opcode) {
+ IA32OperandGenerator g(this);
+ InstructionOperand outputs[1];
+ outputs[0] = g.DefineAsRegister(node);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code = opcode | AddressingModeField::encode(mode);
+ Emit(code, 1, outputs, input_count, inputs);
+}
+
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ DCHECK(!load_rep.IsMapWord());
+ VisitLoad(node, node, GetLoadOpcode(load_rep));
+}
- ArchOpcode opcode;
- switch (load_rep.representation()) {
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
+namespace {
+
+ArchOpcode GetStoreOpcode(MachineRepresentation rep) {
+ switch (rep) {
case MachineRepresentation::kFloat32:
- opcode = kIA32Movss;
- break;
+ return kIA32Movss;
case MachineRepresentation::kFloat64:
- opcode = kIA32Movsd;
- break;
+ return kIA32Movsd;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
- break;
+ return kIA32Movb;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
- break;
+ return kIA32Movw;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
- opcode = kIA32Movl;
- break;
+ return kIA32Movl;
case MachineRepresentation::kSimd128:
- opcode = kIA32Movdqu;
- break;
+ return kIA32Movdqu;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
- case MachineRepresentation::kMapWord:
UNREACHABLE();
}
+}
- IA32OperandGenerator g(this);
- InstructionOperand outputs[1];
- outputs[0] = g.DefineAsRegister(node);
- InstructionOperand inputs[3];
- size_t input_count = 0;
- AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code = opcode | AddressingModeField::encode(mode);
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- code |= AccessModeField::encode(kMemoryAccessPoisoned);
+ArchOpcode GetSeqCstStoreOpcode(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ return kAtomicExchangeInt8;
+ case MachineRepresentation::kWord16:
+ return kAtomicExchangeInt16;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ return kAtomicExchangeWord32;
+ default:
+ UNREACHABLE();
}
- Emit(code, 1, outputs, input_count, inputs);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, MachineRepresentation rep) {
+ IA32OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
-void InstructionSelector::VisitProtectedLoad(Node* node) {
- // TODO(eholk)
- UNIMPLEMENTED();
+ AddressingMode addressing_mode;
+ InstructionOperand value_operand = (rep == MachineRepresentation::kWord8)
+ ? g.UseFixed(value, edx)
+ : g.UseUniqueRegister(value);
+ InstructionOperand inputs[] = {
+ value_operand, g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {
+ (rep == MachineRepresentation::kWord8)
+ // Using DefineSameAsFirst requires the register to be unallocated.
+ ? g.DefineAsFixed(node, edx)
+ : g.DefineSameAsFirst(node)};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, arraysize(inputs), inputs);
}
-void InstructionSelector::VisitStore(Node* node) {
- IA32OperandGenerator g(this);
+void VisitStoreCommon(InstructionSelector* selector, Node* node,
+ StoreRepresentation store_rep,
+ base::Optional<AtomicMemoryOrder> atomic_order) {
+ IA32OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
+ const bool is_seqcst =
+ atomic_order && *atomic_order == AtomicMemoryOrder::kSeqCst;
if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
write_barrier_kind = kFullWriteBarrier;
@@ -603,48 +687,23 @@ void InstructionSelector::VisitStore(Node* node) {
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
size_t const temp_count = arraysize(temps);
- InstructionCode code = kArchStoreWithWriteBarrier;
+ InstructionCode code = is_seqcst ? kArchAtomicStoreWithWriteBarrier
+ : kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, arraysize(inputs), inputs, temp_count, temps);
+ selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, temp_count,
+ temps);
+ } else if (is_seqcst) {
+ VisitAtomicExchange(selector, node, GetSeqCstStoreOpcode(rep), rep);
} else {
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kFloat32:
- opcode = kIA32Movss;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kIA32Movsd;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = kIA32Movb;
- break;
- case MachineRepresentation::kWord16:
- opcode = kIA32Movw;
- break;
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord32:
- opcode = kIA32Movl;
- break;
- case MachineRepresentation::kSimd128:
- opcode = kIA32Movdqu;
- break;
- case MachineRepresentation::kCompressedPointer: // Fall through.
- case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kMapWord: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- }
+ // Release and non-atomic stores emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
InstructionOperand val;
if (g.CanBeImmediate(value)) {
val = g.UseImmediate(value);
- } else if (rep == MachineRepresentation::kWord8 ||
- rep == MachineRepresentation::kBit) {
+ } else if (!atomic_order && (rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kBit)) {
val = g.UseByteRegister(value);
} else {
val = g.UseRegister(value);
@@ -655,13 +714,20 @@ void InstructionSelector::VisitStore(Node* node) {
AddressingMode addressing_mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code =
- opcode | AddressingModeField::encode(addressing_mode);
+ GetStoreOpcode(rep) | AddressingModeField::encode(addressing_mode);
inputs[input_count++] = val;
- Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
- inputs);
+ selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
+ input_count, inputs);
}
}
+} // namespace
+
+void InstructionSelector::VisitStore(Node* node) {
+ VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
+ base::nullopt);
+}
+
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1106,31 +1172,31 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
V(F64x2Trunc, kIA32F64x2Round | MiscField::encode(kRoundToZero)) \
V(F64x2NearestInt, kIA32F64x2Round | MiscField::encode(kRoundToNearest))
-#define RRO_FLOAT_OP_LIST(V) \
- V(Float32Add, kAVXFloat32Add, kSSEFloat32Add) \
- V(Float64Add, kAVXFloat64Add, kSSEFloat64Add) \
- V(Float32Sub, kAVXFloat32Sub, kSSEFloat32Sub) \
- V(Float64Sub, kAVXFloat64Sub, kSSEFloat64Sub) \
- V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \
- V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \
- V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \
- V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) \
- V(F64x2Add, kIA32F64x2Add, kIA32F64x2Add) \
- V(F64x2Sub, kIA32F64x2Sub, kIA32F64x2Sub) \
- V(F64x2Mul, kIA32F64x2Mul, kIA32F64x2Mul) \
- V(F64x2Div, kIA32F64x2Div, kIA32F64x2Div) \
- V(F64x2Eq, kIA32F64x2Eq, kIA32F64x2Eq) \
- V(F64x2Ne, kIA32F64x2Ne, kIA32F64x2Ne) \
- V(F64x2Lt, kIA32F64x2Lt, kIA32F64x2Lt) \
- V(F64x2Le, kIA32F64x2Le, kIA32F64x2Le)
-
-#define FLOAT_UNOP_LIST(V) \
- V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \
- V(Float64Abs, kAVXFloat64Abs, kSSEFloat64Abs) \
- V(Float32Neg, kAVXFloat32Neg, kSSEFloat32Neg) \
- V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg) \
- V(F64x2Abs, kAVXFloat64Abs, kSSEFloat64Abs) \
- V(F64x2Neg, kAVXFloat64Neg, kSSEFloat64Neg)
+#define RRO_FLOAT_OP_LIST(V) \
+ V(Float32Add, kFloat32Add) \
+ V(Float64Add, kFloat64Add) \
+ V(Float32Sub, kFloat32Sub) \
+ V(Float64Sub, kFloat64Sub) \
+ V(Float32Mul, kFloat32Mul) \
+ V(Float64Mul, kFloat64Mul) \
+ V(Float32Div, kFloat32Div) \
+ V(Float64Div, kFloat64Div) \
+ V(F64x2Add, kIA32F64x2Add) \
+ V(F64x2Sub, kIA32F64x2Sub) \
+ V(F64x2Mul, kIA32F64x2Mul) \
+ V(F64x2Div, kIA32F64x2Div) \
+ V(F64x2Eq, kIA32F64x2Eq) \
+ V(F64x2Ne, kIA32F64x2Ne) \
+ V(F64x2Lt, kIA32F64x2Lt) \
+ V(F64x2Le, kIA32F64x2Le)
+
+#define FLOAT_UNOP_LIST(V) \
+ V(Float32Abs, kFloat32Abs) \
+ V(Float64Abs, kFloat64Abs) \
+ V(Float32Neg, kFloat32Neg) \
+ V(Float64Neg, kFloat64Neg) \
+ V(F64x2Abs, kFloat64Abs) \
+ V(F64x2Neg, kFloat64Neg)
#define RO_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1164,17 +1230,17 @@ RR_OP_LIST(RR_VISITOR)
#undef RR_VISITOR
#undef RR_OP_LIST
-#define RRO_FLOAT_VISITOR(Name, avx, sse) \
+#define RRO_FLOAT_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
- VisitRROFloat(this, node, avx, sse); \
+ VisitRROFloat(this, node, opcode); \
}
RRO_FLOAT_OP_LIST(RRO_FLOAT_VISITOR)
#undef RRO_FLOAT_VISITOR
#undef RRO_FLOAT_OP_LIST
-#define FLOAT_UNOP_VISITOR(Name, avx, sse) \
- void InstructionSelector::Visit##Name(Node* node) { \
- VisitFloatUnop(this, node, node->InputAt(0), avx, sse); \
+#define FLOAT_UNOP_VISITOR(Name, opcode) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitFloatUnop(this, node, node->InputAt(0), opcode); \
}
FLOAT_UNOP_LIST(FLOAT_UNOP_VISITOR)
#undef FLOAT_UNOP_VISITOR
@@ -1617,29 +1683,6 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
VisitWordCompare(selector, node, kIA32Cmp, cont);
}
-void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode, MachineRepresentation rep) {
- IA32OperandGenerator g(selector);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
-
- AddressingMode addressing_mode;
- InstructionOperand value_operand = (rep == MachineRepresentation::kWord8)
- ? g.UseFixed(value, edx)
- : g.UseUniqueRegister(value);
- InstructionOperand inputs[] = {
- value_operand, g.UseUniqueRegister(base),
- g.GetEffectiveIndexOperand(index, &addressing_mode)};
- InstructionOperand outputs[] = {
- (rep == MachineRepresentation::kWord8)
- // Using DefineSameAsFirst requires the register to be unallocated.
- ? g.DefineAsFixed(node, edx)
- : g.DefineSameAsFirst(node)};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- selector->Emit(code, 1, outputs, arraysize(inputs), inputs);
-}
-
void VisitAtomicBinOp(InstructionSelector* selector, Node* node,
ArchOpcode opcode, MachineRepresentation rep) {
AddressingMode addressing_mode;
@@ -1949,32 +1992,25 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
load_rep.representation() == MachineRepresentation::kWord16 ||
- load_rep.representation() == MachineRepresentation::kWord32);
+ load_rep.representation() == MachineRepresentation::kWord32 ||
+ load_rep.representation() == MachineRepresentation::kTaggedSigned ||
+ load_rep.representation() == MachineRepresentation::kTaggedPointer ||
+ load_rep.representation() == MachineRepresentation::kTagged);
USE(load_rep);
- VisitLoad(node);
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ VisitLoad(node, node, GetLoadOpcode(load_rep));
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- IA32OperandGenerator g(this);
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicExchangeInt8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicExchangeInt16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicExchangeWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicExchange(this, node, opcode, rep);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitStoreCommon(this, node, store_params.store_representation(),
+ store_params.order());
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
@@ -1982,15 +2018,15 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2007,15 +2043,15 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2053,12 +2089,11 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
VisitAtomicBinOp(this, node, opcode, type.representation());
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2068,6 +2103,8 @@ VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
+ // Both acquire and sequentially consistent loads can emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
IA32OperandGenerator g(this);
AddressingMode mode;
Node* base = node->InputAt(0);
@@ -2079,10 +2116,9 @@ void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
g.GetEffectiveIndexOperand(index, &mode)};
InstructionCode code =
kIA32Word32AtomicPairLoad | AddressingModeField::encode(mode);
- InstructionOperand temps[] = {g.TempDoubleRegister()};
InstructionOperand outputs[] = {g.DefineAsRegister(projection0),
g.DefineAsRegister(projection1)};
- Emit(code, 2, outputs, 2, inputs, 1, temps);
+ Emit(code, 2, outputs, 2, inputs);
} else if (projection0 || projection1) {
// Only one word is needed, so it's enough to load just that.
ArchOpcode opcode = kIA32Movl;
@@ -2103,25 +2139,45 @@ void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
}
void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
+ // Release pair stores emit a MOVQ via a double register, and sequentially
+ // consistent stores emit CMPXCHG8B.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
Node* value_high = node->InputAt(3);
- AddressingMode addressing_mode;
- InstructionOperand inputs[] = {
- g.UseUniqueRegisterOrSlotOrConstant(value), g.UseFixed(value_high, ecx),
- g.UseUniqueRegister(base),
- g.GetEffectiveIndexOperand(index, &addressing_mode)};
- // Allocating temp registers here as stores are performed using an atomic
- // exchange, the output of which is stored in edx:eax, which should be saved
- // and restored at the end of the instruction.
- InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
- const int num_temps = arraysize(temps);
- InstructionCode code =
- kIA32Word32AtomicPairStore | AddressingModeField::encode(addressing_mode);
- Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
+ AtomicMemoryOrder order = OpParameter<AtomicMemoryOrder>(node->op());
+ if (order == AtomicMemoryOrder::kAcqRel) {
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegisterOrSlotOrConstant(value),
+ g.UseUniqueRegisterOrSlotOrConstant(value_high),
+ g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode),
+ };
+ InstructionCode code = kIA32Word32ReleasePairStore |
+ AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, arraysize(inputs), inputs);
+ } else {
+ DCHECK_EQ(order, AtomicMemoryOrder::kSeqCst);
+
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegisterOrSlotOrConstant(value), g.UseFixed(value_high, ecx),
+ g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ // Allocating temp registers here as stores are performed using an atomic
+ // exchange, the output of which is stored in edx:eax, which should be saved
+ // and restored at the end of the instruction.
+ InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
+ const int num_temps = arraysize(temps);
+ InstructionCode code = kIA32Word32SeqCstPairStore |
+ AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
+ }
}
void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
@@ -2193,60 +2249,57 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#define SIMD_BINOP_LIST(V) \
V(F32x4Min) \
V(F32x4Max) \
- V(F32x4Eq) \
- V(F32x4Ne) \
- V(F32x4Lt) \
- V(F32x4Le) \
- V(I32x4Add) \
- V(I32x4Sub) \
- V(I32x4Mul) \
- V(I32x4MinS) \
- V(I32x4MaxS) \
- V(I32x4Eq) \
- V(I32x4Ne) \
- V(I32x4GtS) \
- V(I32x4GeS) \
- V(I32x4MinU) \
- V(I32x4MaxU) \
V(I32x4GtU) \
V(I32x4GeU) \
- V(I16x8SConvertI32x4) \
- V(I16x8Add) \
- V(I16x8AddSatS) \
- V(I16x8Sub) \
- V(I16x8SubSatS) \
- V(I16x8Mul) \
- V(I16x8MinS) \
- V(I16x8MaxS) \
- V(I16x8Eq) \
V(I16x8Ne) \
- V(I16x8GtS) \
V(I16x8GeS) \
- V(I16x8AddSatU) \
- V(I16x8SubSatU) \
- V(I16x8MinU) \
- V(I16x8MaxU) \
V(I16x8GtU) \
V(I16x8GeU) \
- V(I8x16SConvertI16x8) \
V(I8x16Ne) \
V(I8x16GeS) \
V(I8x16GtU) \
- V(I8x16GeU) \
- V(S128And) \
- V(S128Or) \
- V(S128Xor)
+ V(I8x16GeU)
#define SIMD_BINOP_UNIFIED_SSE_AVX_LIST(V) \
V(F32x4Add) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Div) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
V(I64x2Add) \
V(I64x2Sub) \
V(I64x2Eq) \
V(I64x2Ne) \
+ V(I32x4Add) \
+ V(I32x4Sub) \
+ V(I32x4Mul) \
+ V(I32x4MinS) \
+ V(I32x4MaxS) \
+ V(I32x4Eq) \
+ V(I32x4Ne) \
+ V(I32x4GtS) \
+ V(I32x4GeS) \
+ V(I32x4MinU) \
+ V(I32x4MaxU) \
V(I32x4DotI16x8S) \
+ V(I16x8Add) \
+ V(I16x8AddSatS) \
+ V(I16x8Sub) \
+ V(I16x8SubSatS) \
+ V(I16x8Mul) \
+ V(I16x8Eq) \
+ V(I16x8GtS) \
+ V(I16x8MinS) \
+ V(I16x8MaxS) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
+ V(I16x8MinU) \
+ V(I16x8MaxU) \
+ V(I16x8SConvertI32x4) \
+ V(I16x8UConvertI32x4) \
V(I16x8RoundingAverageU) \
V(I8x16Add) \
V(I8x16AddSatS) \
@@ -2260,7 +2313,12 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I8x16SubSatU) \
V(I8x16MinU) \
V(I8x16MaxU) \
- V(I8x16RoundingAverageU)
+ V(I8x16SConvertI16x8) \
+ V(I8x16UConvertI16x8) \
+ V(I8x16RoundingAverageU) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor)
// These opcodes require all inputs to be registers because the codegen is
// simpler with all registers.
@@ -2462,7 +2520,12 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
}
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
- VisitRRSimd(this, node, kIA32I32x4SConvertF32x4);
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ InstructionOperand dst =
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+ Emit(kIA32I32x4SConvertF32x4, dst, g.UseRegister(node->InputAt(0)),
+ arraysize(temps), temps);
}
void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
@@ -2625,26 +2688,6 @@ SIMD_BINOP_RRR(VISIT_SIMD_BINOP_RRR)
#undef VISIT_SIMD_BINOP_RRR
#undef SIMD_BINOP_RRR
-// TODO(v8:9198): SSE requires operand1 to be a register as we don't have memory
-// alignment yet. For AVX, memory operands are fine, but can have performance
-// issues if not aligned to 16/32 bytes (based on load size), see SDM Vol 1,
-// chapter 14.9
-void VisitPack(InstructionSelector* selector, Node* node, ArchOpcode avx_opcode,
- ArchOpcode sse_opcode) {
- IA32OperandGenerator g(selector);
- InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseRegister(node->InputAt(1));
- if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineSameAsFirst(node), operand0, operand1);
- } else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
- }
-}
-
-void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
- VisitPack(this, node, kAVXI16x8UConvertI32x4, kSSEI16x8UConvertI32x4);
-}
-
void InstructionSelector::VisitI16x8BitMask(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register()};
@@ -2652,43 +2695,16 @@ void InstructionSelector::VisitI16x8BitMask(Node* node) {
g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps);
}
-void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
- VisitPack(this, node, kAVXI8x16UConvertI16x8, kSSEI8x16UConvertI16x8);
-}
-
void InstructionSelector::VisitI8x16Shl(Node* node) {
- IA32OperandGenerator g(this);
- if (g.CanBeImmediate(node->InputAt(1))) {
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
- this->Emit(kIA32I8x16Shl, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)),
- g.UseImmediate(node->InputAt(1)), arraysize(temps), temps);
- } else {
- VisitRROI8x16SimdShift(this, node, kIA32I8x16Shl);
- }
+ VisitI8x16Shift(this, node, kIA32I8x16Shl);
}
void InstructionSelector::VisitI8x16ShrS(Node* node) {
- IA32OperandGenerator g(this);
- if (g.CanBeImmediate(node->InputAt(1))) {
- this->Emit(kIA32I8x16ShrS, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)),
- g.UseImmediate(node->InputAt(1)));
- } else {
- VisitRROI8x16SimdShift(this, node, kIA32I8x16ShrS);
- }
+ VisitI8x16Shift(this, node, kIA32I8x16ShrS);
}
void InstructionSelector::VisitI8x16ShrU(Node* node) {
- IA32OperandGenerator g(this);
- if (g.CanBeImmediate(node->InputAt(1))) {
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
- this->Emit(kIA32I8x16ShrU, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)),
- g.UseImmediate(node->InputAt(1)), arraysize(temps), temps);
- } else {
- VisitRROI8x16SimdShift(this, node, kIA32I8x16ShrU);
- }
+ VisitI8x16Shift(this, node, kIA32I8x16ShrU);
}
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 31d669813e..63cf3ca06f 100644
--- a/deps/v8/src/compiler/backend/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -17,6 +17,8 @@
#include "src/compiler/backend/mips/instruction-codes-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/compiler/backend/mips64/instruction-codes-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/compiler/backend/loong64/instruction-codes-loong64.h"
#elif V8_TARGET_ARCH_X64
#include "src/compiler/backend/x64/instruction-codes-x64.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
@@ -30,6 +32,7 @@
#define TARGET_ADDRESSING_MODE_LIST(V)
#endif
#include "src/base/bit-field.h"
+#include "src/codegen/atomic-memory-order.h"
#include "src/compiler/write-barrier-kind.h"
namespace v8 {
@@ -99,53 +102,53 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
V(ArchParentFramePointer) \
V(ArchTruncateDoubleToI) \
V(ArchStoreWithWriteBarrier) \
+ V(ArchAtomicStoreWithWriteBarrier) \
V(ArchStackSlot) \
- V(ArchWordPoisonOnSpeculation) \
V(ArchStackPointerGreaterThan) \
V(ArchStackCheckOffset) \
- V(Word32AtomicLoadInt8) \
- V(Word32AtomicLoadUint8) \
- V(Word32AtomicLoadInt16) \
- V(Word32AtomicLoadUint16) \
- V(Word32AtomicLoadWord32) \
- V(Word32AtomicStoreWord8) \
- V(Word32AtomicStoreWord16) \
- V(Word32AtomicStoreWord32) \
- V(Word32AtomicExchangeInt8) \
- V(Word32AtomicExchangeUint8) \
- V(Word32AtomicExchangeInt16) \
- V(Word32AtomicExchangeUint16) \
- V(Word32AtomicExchangeWord32) \
- V(Word32AtomicCompareExchangeInt8) \
- V(Word32AtomicCompareExchangeUint8) \
- V(Word32AtomicCompareExchangeInt16) \
- V(Word32AtomicCompareExchangeUint16) \
- V(Word32AtomicCompareExchangeWord32) \
- V(Word32AtomicAddInt8) \
- V(Word32AtomicAddUint8) \
- V(Word32AtomicAddInt16) \
- V(Word32AtomicAddUint16) \
- V(Word32AtomicAddWord32) \
- V(Word32AtomicSubInt8) \
- V(Word32AtomicSubUint8) \
- V(Word32AtomicSubInt16) \
- V(Word32AtomicSubUint16) \
- V(Word32AtomicSubWord32) \
- V(Word32AtomicAndInt8) \
- V(Word32AtomicAndUint8) \
- V(Word32AtomicAndInt16) \
- V(Word32AtomicAndUint16) \
- V(Word32AtomicAndWord32) \
- V(Word32AtomicOrInt8) \
- V(Word32AtomicOrUint8) \
- V(Word32AtomicOrInt16) \
- V(Word32AtomicOrUint16) \
- V(Word32AtomicOrWord32) \
- V(Word32AtomicXorInt8) \
- V(Word32AtomicXorUint8) \
- V(Word32AtomicXorInt16) \
- V(Word32AtomicXorUint16) \
- V(Word32AtomicXorWord32) \
+ V(AtomicLoadInt8) \
+ V(AtomicLoadUint8) \
+ V(AtomicLoadInt16) \
+ V(AtomicLoadUint16) \
+ V(AtomicLoadWord32) \
+ V(AtomicStoreWord8) \
+ V(AtomicStoreWord16) \
+ V(AtomicStoreWord32) \
+ V(AtomicExchangeInt8) \
+ V(AtomicExchangeUint8) \
+ V(AtomicExchangeInt16) \
+ V(AtomicExchangeUint16) \
+ V(AtomicExchangeWord32) \
+ V(AtomicCompareExchangeInt8) \
+ V(AtomicCompareExchangeUint8) \
+ V(AtomicCompareExchangeInt16) \
+ V(AtomicCompareExchangeUint16) \
+ V(AtomicCompareExchangeWord32) \
+ V(AtomicAddInt8) \
+ V(AtomicAddUint8) \
+ V(AtomicAddInt16) \
+ V(AtomicAddUint16) \
+ V(AtomicAddWord32) \
+ V(AtomicSubInt8) \
+ V(AtomicSubUint8) \
+ V(AtomicSubInt16) \
+ V(AtomicSubUint16) \
+ V(AtomicSubWord32) \
+ V(AtomicAndInt8) \
+ V(AtomicAndUint8) \
+ V(AtomicAndInt16) \
+ V(AtomicAndUint16) \
+ V(AtomicAndWord32) \
+ V(AtomicOrInt8) \
+ V(AtomicOrUint8) \
+ V(AtomicOrInt16) \
+ V(AtomicOrUint16) \
+ V(AtomicOrWord32) \
+ V(AtomicXorInt8) \
+ V(AtomicXorUint8) \
+ V(AtomicXorInt16) \
+ V(AtomicXorUint16) \
+ V(AtomicXorWord32) \
V(Ieee754Float64Acos) \
V(Ieee754Float64Acosh) \
V(Ieee754Float64Asin) \
@@ -208,12 +211,10 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
enum FlagsMode {
kFlags_none = 0,
kFlags_branch = 1,
- kFlags_branch_and_poison = 2,
- kFlags_deoptimize = 3,
- kFlags_deoptimize_and_poison = 4,
- kFlags_set = 5,
- kFlags_trap = 6,
- kFlags_select = 7,
+ kFlags_deoptimize = 2,
+ kFlags_set = 3,
+ kFlags_trap = 4,
+ kFlags_select = 5,
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
@@ -262,9 +263,20 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
enum MemoryAccessMode {
kMemoryAccessDirect = 0,
kMemoryAccessProtected = 1,
- kMemoryAccessPoisoned = 2
};
+enum class AtomicWidth { kWord32, kWord64 };
+
+inline size_t AtomicWidthSize(AtomicWidth width) {
+ switch (width) {
+ case AtomicWidth::kWord32:
+ return 4;
+ case AtomicWidth::kWord64:
+ return 8;
+ }
+ UNREACHABLE();
+}
+
// The InstructionCode is an opaque, target-specific integer that encodes
// what code to emit for an instruction in the code generator. It is not
// interesting to the register allocator, as the inputs and flags on the
@@ -279,6 +291,9 @@ using ArchOpcodeField = base::BitField<ArchOpcode, 0, 9>;
static_assert(ArchOpcodeField::is_valid(kLastArchOpcode),
"All opcodes must fit in the 9-bit ArchOpcodeField.");
using AddressingModeField = base::BitField<AddressingMode, 9, 5>;
+static_assert(
+ AddressingModeField::is_valid(kLastAddressingMode),
+ "All addressing modes must fit in the 5-bit AddressingModeField.");
using FlagsModeField = base::BitField<FlagsMode, 14, 3>;
using FlagsConditionField = base::BitField<FlagsCondition, 17, 5>;
using DeoptImmedArgsCountField = base::BitField<int, 22, 2>;
@@ -287,8 +302,29 @@ using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
// size, an access mode, or both inside the overlapping MiscField.
using LaneSizeField = base::BitField<int, 22, 8>;
using AccessModeField = base::BitField<MemoryAccessMode, 30, 2>;
+// AtomicWidthField overlaps with MiscField and is used for the various Atomic
+// opcodes. Only used on 64bit architectures. All atomic instructions on 32bit
+// architectures are assumed to be 32bit wide.
+using AtomicWidthField = base::BitField<AtomicWidth, 22, 2>;
+// AtomicMemoryOrderField overlaps with MiscField and is used for the various
+// Atomic opcodes. This field is not used on all architectures. It is used on
+// architectures where the codegen for kSeqCst and kAcqRel differ only by
+// emitting fences.
+using AtomicMemoryOrderField = base::BitField<AtomicMemoryOrder, 24, 2>;
+using AtomicStoreRecordWriteModeField = base::BitField<RecordWriteMode, 26, 4>;
using MiscField = base::BitField<int, 22, 10>;
+// This static assertion serves as an early warning if we are about to exhaust
+// the available opcode space. If we are about to exhaust it, we should start
+// looking into options to compress some opcodes (see
+// https://crbug.com/v8/12093) before we fully run out of available opcodes.
+// Otherwise we risk being unable to land an important security fix or merge
+// back fixes that add new opcodes.
+// It is OK to temporarily reduce the required slack if we have a tracking bug
+// to reduce the number of used opcodes again.
+static_assert(ArchOpcodeField::kMax - kLastArchOpcode >= 16,
+ "We are running close to the number of available opcodes.");
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc
index c46d263bae..bdad838f3e 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc
@@ -132,7 +132,6 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
// We should not have branches in the middle of a block.
DCHECK_NE(instr->flags_mode(), kFlags_branch);
- DCHECK_NE(instr->flags_mode(), kFlags_branch_and_poison);
if (IsFixedRegisterParameter(instr)) {
if (last_live_in_reg_marker_ != nullptr) {
@@ -298,11 +297,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
// effects.
return kIsLoadOperation;
- case kArchWordPoisonOnSpeculation:
- // While poisoning operations have no side effect, they must not be
- // reordered relative to branches.
- return kHasSideEffect;
-
case kArchPrepareCallCFunction:
case kArchPrepareTailCall:
case kArchTailCallCodeObject:
@@ -334,55 +328,56 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
return kIsBarrier;
case kArchStoreWithWriteBarrier:
+ case kArchAtomicStoreWithWriteBarrier:
return kHasSideEffect;
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
return kIsLoadOperation;
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
return kHasSideEffect;
- case kWord32AtomicExchangeInt8:
- case kWord32AtomicExchangeUint8:
- case kWord32AtomicExchangeInt16:
- case kWord32AtomicExchangeUint16:
- case kWord32AtomicExchangeWord32:
- case kWord32AtomicCompareExchangeInt8:
- case kWord32AtomicCompareExchangeUint8:
- case kWord32AtomicCompareExchangeInt16:
- case kWord32AtomicCompareExchangeUint16:
- case kWord32AtomicCompareExchangeWord32:
- case kWord32AtomicAddInt8:
- case kWord32AtomicAddUint8:
- case kWord32AtomicAddInt16:
- case kWord32AtomicAddUint16:
- case kWord32AtomicAddWord32:
- case kWord32AtomicSubInt8:
- case kWord32AtomicSubUint8:
- case kWord32AtomicSubInt16:
- case kWord32AtomicSubUint16:
- case kWord32AtomicSubWord32:
- case kWord32AtomicAndInt8:
- case kWord32AtomicAndUint8:
- case kWord32AtomicAndInt16:
- case kWord32AtomicAndUint16:
- case kWord32AtomicAndWord32:
- case kWord32AtomicOrInt8:
- case kWord32AtomicOrUint8:
- case kWord32AtomicOrInt16:
- case kWord32AtomicOrUint16:
- case kWord32AtomicOrWord32:
- case kWord32AtomicXorInt8:
- case kWord32AtomicXorUint8:
- case kWord32AtomicXorInt16:
- case kWord32AtomicXorUint16:
- case kWord32AtomicXorWord32:
+ case kAtomicExchangeInt8:
+ case kAtomicExchangeUint8:
+ case kAtomicExchangeInt16:
+ case kAtomicExchangeUint16:
+ case kAtomicExchangeWord32:
+ case kAtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeWord32:
+ case kAtomicAddInt8:
+ case kAtomicAddUint8:
+ case kAtomicAddInt16:
+ case kAtomicAddUint16:
+ case kAtomicAddWord32:
+ case kAtomicSubInt8:
+ case kAtomicSubUint8:
+ case kAtomicSubInt16:
+ case kAtomicSubUint16:
+ case kAtomicSubWord32:
+ case kAtomicAndInt8:
+ case kAtomicAndUint8:
+ case kAtomicAndInt16:
+ case kAtomicAndUint16:
+ case kAtomicAndWord32:
+ case kAtomicOrInt8:
+ case kAtomicOrUint8:
+ case kAtomicOrInt16:
+ case kAtomicOrUint16:
+ case kAtomicOrWord32:
+ case kAtomicXorInt8:
+ case kAtomicXorUint8:
+ case kAtomicXorInt16:
+ case kAtomicXorUint16:
+ case kAtomicXorWord32:
return kHasSideEffect;
#define CASE(Name) case k##Name:
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index f279ea1590..cd2b83ac3d 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -39,7 +39,7 @@ InstructionSelector::InstructionSelector(
size_t* max_pushed_argument_count, SourcePositionMode source_position_mode,
Features features, EnableScheduling enable_scheduling,
EnableRootsRelativeAddressing enable_roots_relative_addressing,
- PoisoningMitigationLevel poisoning_level, EnableTraceTurboJson trace_turbo)
+ EnableTraceTurboJson trace_turbo)
: zone_(zone),
linkage_(linkage),
sequence_(sequence),
@@ -63,7 +63,6 @@ InstructionSelector::InstructionSelector(
enable_roots_relative_addressing_(enable_roots_relative_addressing),
enable_switch_jump_table_(enable_switch_jump_table),
state_values_cache_(zone),
- poisoning_level_(poisoning_level),
frame_(frame),
instruction_selection_failed_(false),
instr_origins_(sequence->zone()),
@@ -1076,17 +1075,10 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
}
DCHECK_EQ(1u, buffer->instruction_args.size());
- // Argument 1 is used for poison-alias index (encoded in a word-sized
- // immediate. This an index of the operand that aliases with poison register
- // or -1 if there is no aliasing.
- buffer->instruction_args.push_back(g.TempImmediate(-1));
- const size_t poison_alias_index = 1;
- DCHECK_EQ(buffer->instruction_args.size() - 1, poison_alias_index);
-
// If the call needs a frame state, we insert the state information as
// follows (n is the number of value inputs to the frame state):
- // arg 2 : deoptimization id.
- // arg 3 - arg (n + 2) : value inputs to the frame state.
+ // arg 1 : deoptimization id.
+ // arg 2 - arg (n + 2) : value inputs to the frame state.
size_t frame_state_entries = 0;
USE(frame_state_entries); // frame_state_entries is only used for debug.
if (buffer->frame_state_descriptor != nullptr) {
@@ -1123,7 +1115,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
&buffer->instruction_args, FrameStateInputKind::kStackSlot,
instruction_zone());
- DCHECK_EQ(2 + frame_state_entries, buffer->instruction_args.size());
+ DCHECK_EQ(1 + frame_state_entries, buffer->instruction_args.size());
}
size_t input_count = static_cast<size_t>(buffer->input_count());
@@ -1159,23 +1151,11 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
buffer->pushed_nodes[stack_index] = param;
pushed_count++;
} else {
- // If we do load poisoning and the linkage uses the poisoning register,
- // then we request the input in memory location, and during code
- // generation, we move the input to the register.
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison &&
- unallocated.HasFixedRegisterPolicy()) {
- int reg = unallocated.fixed_register_index();
- if (Register::from_code(reg) == kSpeculationPoisonRegister) {
- buffer->instruction_args[poison_alias_index] = g.TempImmediate(
- static_cast<int32_t>(buffer->instruction_args.size()));
- op = g.UseRegisterOrSlotOrConstant(*iter);
- }
- }
buffer->instruction_args.push_back(op);
}
}
DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
- frame_state_entries - 1);
+ frame_state_entries);
if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && is_tail_call &&
stack_param_delta != 0) {
// For tail calls that change the size of their parameter list and keep
@@ -1509,11 +1489,6 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
return VisitLoadLane(node);
}
- case IrOpcode::kPoisonedLoad: {
- LoadRepresentation type = LoadRepresentationOf(node->op());
- MarkAsRepresentation(type.representation(), node);
- return VisitPoisonedLoad(node);
- }
case IrOpcode::kStore:
return VisitStore(node);
case IrOpcode::kProtectedStore:
@@ -1850,12 +1825,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
case IrOpcode::kFloat64InsertHighWord32:
return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
- case IrOpcode::kTaggedPoisonOnSpeculation:
- return MarkAsTagged(node), VisitTaggedPoisonOnSpeculation(node);
- case IrOpcode::kWord32PoisonOnSpeculation:
- return MarkAsWord32(node), VisitWord32PoisonOnSpeculation(node);
- case IrOpcode::kWord64PoisonOnSpeculation:
- return MarkAsWord64(node), VisitWord64PoisonOnSpeculation(node);
case IrOpcode::kStackSlot:
return VisitStackSlot(node);
case IrOpcode::kStackPointerGreaterThan:
@@ -1900,12 +1869,14 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kMemoryBarrier:
return VisitMemoryBarrier(node);
case IrOpcode::kWord32AtomicLoad: {
- LoadRepresentation type = LoadRepresentationOf(node->op());
+ AtomicLoadParameters params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation type = params.representation();
MarkAsRepresentation(type.representation(), node);
return VisitWord32AtomicLoad(node);
}
case IrOpcode::kWord64AtomicLoad: {
- LoadRepresentation type = LoadRepresentationOf(node->op());
+ AtomicLoadParameters params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation type = params.representation();
MarkAsRepresentation(type.representation(), node);
return VisitWord64AtomicLoad(node);
}
@@ -2389,30 +2360,6 @@ void InstructionSelector::VisitNode(Node* node) {
}
}
-void InstructionSelector::EmitWordPoisonOnSpeculation(Node* node) {
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- OperandGenerator g(this);
- Node* input_node = NodeProperties::GetValueInput(node, 0);
- InstructionOperand input = g.UseRegister(input_node);
- InstructionOperand output = g.DefineSameAsFirst(node);
- Emit(kArchWordPoisonOnSpeculation, output, input);
- } else {
- EmitIdentity(node);
- }
-}
-
-void InstructionSelector::VisitWord32PoisonOnSpeculation(Node* node) {
- EmitWordPoisonOnSpeculation(node);
-}
-
-void InstructionSelector::VisitWord64PoisonOnSpeculation(Node* node) {
- EmitWordPoisonOnSpeculation(node);
-}
-
-void InstructionSelector::VisitTaggedPoisonOnSpeculation(Node* node) {
- EmitWordPoisonOnSpeculation(node);
-}
-
void InstructionSelector::VisitStackPointerGreaterThan(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kStackPointerGreaterThanCondition, node);
@@ -2766,7 +2713,8 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
- !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_RISCV64
+ !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && \
+ !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
@@ -2792,7 +2740,7 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
}
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC64
// !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 &&
- // !V8_TARGET_ARCH_RISCV64
+ // !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
// This is only needed on 32-bit to split the 64-bit value into two operands.
@@ -2806,11 +2754,12 @@ void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) {
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
#if !V8_TARGET_ARCH_ARM64
-#if !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64 && !V8_TARGET_ARCH_RISCV64
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64 &&
+ // !V8_TARGET_ARCH_RISCV64
void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); }
@@ -3104,45 +3053,24 @@ void InstructionSelector::VisitReturn(Node* ret) {
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
- if (NeedsPoisoning(IsSafetyCheckOf(branch->op()))) {
- FlagsContinuation cont =
- FlagsContinuation::ForBranchAndPoison(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(branch, branch->InputAt(0), &cont);
- } else {
- FlagsContinuation cont =
- FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(branch, branch->InputAt(0), &cont);
- }
+ FlagsContinuation cont =
+ FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch);
+ VisitWordCompareZero(branch, branch->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- if (NeedsPoisoning(p.is_safety_check())) {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
- kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(),
- node->InputAt(1));
- VisitWordCompareZero(node, node->InputAt(0), &cont);
- } else {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(),
- node->InputAt(1));
- VisitWordCompareZero(node, node->InputAt(0), &cont);
- }
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(),
+ node->InputAt(1));
+ VisitWordCompareZero(node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- if (NeedsPoisoning(p.is_safety_check())) {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
- kEqual, p.kind(), p.reason(), node->id(), p.feedback(),
- node->InputAt(1));
- VisitWordCompareZero(node, node->InputAt(0), &cont);
- } else {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->id(), p.feedback(),
- node->InputAt(1));
- VisitWordCompareZero(node, node->InputAt(0), &cont);
- }
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, p.kind(), p.reason(), node->id(), p.feedback(), node->InputAt(1));
+ VisitWordCompareZero(node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitSelect(Node* node) {
@@ -3186,17 +3114,10 @@ void InstructionSelector::VisitDynamicCheckMapsWithDeoptUnless(Node* node) {
g.UseImmediate(n.slot()), g.UseImmediate(n.handler())});
}
- if (NeedsPoisoning(IsSafetyCheck::kCriticalSafetyCheck)) {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
- kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
- dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
- VisitWordCompareZero(node, n.condition(), &cont);
- } else {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
- dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
- VisitWordCompareZero(node, n.condition(), &cont);
- }
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
+ dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
+ VisitWordCompareZero(node, n.condition(), &cont);
}
void InstructionSelector::VisitTrapIf(Node* node, TrapId trap_id) {
@@ -3409,18 +3330,6 @@ void InstructionSelector::SwapShuffleInputs(Node* node) {
}
#endif // V8_ENABLE_WEBASSEMBLY
-// static
-bool InstructionSelector::NeedsPoisoning(IsSafetyCheck safety_check) const {
- switch (poisoning_level_) {
- case PoisoningMitigationLevel::kDontPoison:
- return false;
- case PoisoningMitigationLevel::kPoisonAll:
- return safety_check != IsSafetyCheck::kNoSafetyCheck;
- case PoisoningMitigationLevel::kPoisonCriticalOnly:
- return safety_check == IsSafetyCheck::kCriticalSafetyCheck;
- }
- UNREACHABLE();
-}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index 11a329d1d6..b33de8e856 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -54,13 +54,6 @@ class FlagsContinuation final {
return FlagsContinuation(kFlags_branch, condition, true_block, false_block);
}
- static FlagsContinuation ForBranchAndPoison(FlagsCondition condition,
- BasicBlock* true_block,
- BasicBlock* false_block) {
- return FlagsContinuation(kFlags_branch_and_poison, condition, true_block,
- false_block);
- }
-
// Creates a new flags continuation for an eager deoptimization exit.
static FlagsContinuation ForDeoptimize(
FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
@@ -71,16 +64,6 @@ class FlagsContinuation final {
extra_args_count);
}
- // Creates a new flags continuation for an eager deoptimization exit.
- static FlagsContinuation ForDeoptimizeAndPoison(
- FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
- NodeId node_id, FeedbackSource const& feedback, Node* frame_state,
- InstructionOperand* extra_args = nullptr, int extra_args_count = 0) {
- return FlagsContinuation(kFlags_deoptimize_and_poison, condition, kind,
- reason, node_id, feedback, frame_state, extra_args,
- extra_args_count);
- }
-
// Creates a new flags continuation for a boolean value.
static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
return FlagsContinuation(condition, result);
@@ -98,16 +81,8 @@ class FlagsContinuation final {
}
bool IsNone() const { return mode_ == kFlags_none; }
- bool IsBranch() const {
- return mode_ == kFlags_branch || mode_ == kFlags_branch_and_poison;
- }
- bool IsDeoptimize() const {
- return mode_ == kFlags_deoptimize || mode_ == kFlags_deoptimize_and_poison;
- }
- bool IsPoisoned() const {
- return mode_ == kFlags_branch_and_poison ||
- mode_ == kFlags_deoptimize_and_poison;
- }
+ bool IsBranch() const { return mode_ == kFlags_branch; }
+ bool IsDeoptimize() const { return mode_ == kFlags_deoptimize; }
bool IsSet() const { return mode_ == kFlags_set; }
bool IsTrap() const { return mode_ == kFlags_trap; }
bool IsSelect() const { return mode_ == kFlags_select; }
@@ -226,7 +201,7 @@ class FlagsContinuation final {
condition_(condition),
true_block_(true_block),
false_block_(false_block) {
- DCHECK(mode == kFlags_branch || mode == kFlags_branch_and_poison);
+ DCHECK(mode == kFlags_branch);
DCHECK_NOT_NULL(true_block);
DCHECK_NOT_NULL(false_block);
}
@@ -245,7 +220,7 @@ class FlagsContinuation final {
frame_state_or_result_(frame_state),
extra_args_(extra_args),
extra_args_count_(extra_args_count) {
- DCHECK(mode == kFlags_deoptimize || mode == kFlags_deoptimize_and_poison);
+ DCHECK(mode == kFlags_deoptimize);
DCHECK_NOT_NULL(frame_state);
}
@@ -338,8 +313,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
: kDisableScheduling,
EnableRootsRelativeAddressing enable_roots_relative_addressing =
kDisableRootsRelativeAddressing,
- PoisoningMitigationLevel poisoning_level =
- PoisoningMitigationLevel::kDontPoison,
EnableTraceTurboJson trace_turbo = kDisableTraceTurboJson);
// Visit code for the entire graph with the included schedule.
@@ -443,8 +416,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
- bool NeedsPoisoning(IsSafetyCheck safety_check) const;
-
// ===========================================================================
// ============ Architecture-independent graph covering methods. =============
// ===========================================================================
@@ -681,8 +652,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont);
- void EmitWordPoisonOnSpeculation(Node* node);
-
void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
const CallDescriptor* call_descriptor, Node* node);
void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
@@ -797,7 +766,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
FrameStateInput::Equal>
state_values_cache_;
- PoisoningMitigationLevel poisoning_level_;
Frame* frame_;
bool instruction_selection_failed_;
ZoneVector<std::pair<int, int>> instr_origins_;
diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index 63ca78e060..0da8e054ae 100644
--- a/deps/v8/src/compiler/backend/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -410,12 +410,8 @@ std::ostream& operator<<(std::ostream& os, const FlagsMode& fm) {
return os;
case kFlags_branch:
return os << "branch";
- case kFlags_branch_and_poison:
- return os << "branch_and_poison";
case kFlags_deoptimize:
return os << "deoptimize";
- case kFlags_deoptimize_and_poison:
- return os << "deoptimize_and_poison";
case kFlags_set:
return os << "set";
case kFlags_trap:
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 204683c973..8698ed8a98 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -935,8 +935,7 @@ class V8_EXPORT_PRIVATE Instruction final {
bool IsDeoptimizeCall() const {
return arch_opcode() == ArchOpcode::kArchDeoptimize ||
- FlagsModeField::decode(opcode()) == kFlags_deoptimize ||
- FlagsModeField::decode(opcode()) == kFlags_deoptimize_and_poison;
+ FlagsModeField::decode(opcode()) == kFlags_deoptimize;
}
bool IsTrap() const {
diff --git a/deps/v8/src/compiler/backend/jump-threading.cc b/deps/v8/src/compiler/backend/jump-threading.cc
index e91b7e17d2..258d05955e 100644
--- a/deps/v8/src/compiler/backend/jump-threading.cc
+++ b/deps/v8/src/compiler/backend/jump-threading.cc
@@ -55,17 +55,6 @@ struct JumpThreadingState {
RpoNumber onstack() { return RpoNumber::FromInt(-2); }
};
-bool IsBlockWithBranchPoisoning(InstructionSequence* code,
- InstructionBlock* block) {
- if (block->PredecessorCount() != 1) return false;
- RpoNumber pred_rpo = (block->predecessors())[0];
- const InstructionBlock* pred = code->InstructionBlockAt(pred_rpo);
- if (pred->code_start() == pred->code_end()) return false;
- Instruction* instr = code->InstructionAt(pred->code_end() - 1);
- FlagsMode mode = FlagsModeField::decode(instr->opcode());
- return mode == kFlags_branch_and_poison;
-}
-
} // namespace
bool JumpThreading::ComputeForwarding(Zone* local_zone,
@@ -92,85 +81,80 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
TRACE("jt [%d] B%d\n", static_cast<int>(stack.size()),
block->rpo_number().ToInt());
RpoNumber fw = block->rpo_number();
- if (!IsBlockWithBranchPoisoning(code, block)) {
- bool fallthru = true;
- for (int i = block->code_start(); i < block->code_end(); ++i) {
- Instruction* instr = code->InstructionAt(i);
- if (!instr->AreMovesRedundant()) {
- // can't skip instructions with non redundant moves.
- TRACE(" parallel move\n");
- fallthru = false;
- } else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
- // can't skip instructions with flags continuations.
- TRACE(" flags\n");
- fallthru = false;
- } else if (instr->IsNop()) {
- // skip nops.
- TRACE(" nop\n");
- continue;
- } else if (instr->arch_opcode() == kArchJmp) {
- // try to forward the jump instruction.
- TRACE(" jmp\n");
- // if this block deconstructs the frame, we can't forward it.
- // TODO(mtrofin): we can still forward if we end up building
- // the frame at start. So we should move the decision of whether
- // to build a frame or not in the register allocator, and trickle it
- // here and to the code generator.
- if (frame_at_start || !(block->must_deconstruct_frame() ||
- block->must_construct_frame())) {
- fw = code->InputRpo(instr, 0);
- }
- fallthru = false;
- } else if (instr->IsRet()) {
- TRACE(" ret\n");
- if (fallthru) {
- CHECK_IMPLIES(block->must_construct_frame(),
- block->must_deconstruct_frame());
- // Only handle returns with immediate/constant operands, since
- // they must always be the same for all returns in a function.
- // Dynamic return values might use different registers at
- // different return sites and therefore cannot be shared.
- if (instr->InputAt(0)->IsImmediate()) {
- int32_t return_size = ImmediateOperand::cast(instr->InputAt(0))
- ->inline_int32_value();
- // Instructions can be shared only for blocks that share
- // the same |must_deconstruct_frame| attribute.
- if (block->must_deconstruct_frame()) {
- if (empty_deconstruct_frame_return_block ==
- RpoNumber::Invalid()) {
- empty_deconstruct_frame_return_block = block->rpo_number();
- empty_deconstruct_frame_return_size = return_size;
- } else if (empty_deconstruct_frame_return_size ==
- return_size) {
- fw = empty_deconstruct_frame_return_block;
- block->clear_must_deconstruct_frame();
- }
- } else {
- if (empty_no_deconstruct_frame_return_block ==
- RpoNumber::Invalid()) {
- empty_no_deconstruct_frame_return_block =
- block->rpo_number();
- empty_no_deconstruct_frame_return_size = return_size;
- } else if (empty_no_deconstruct_frame_return_size ==
- return_size) {
- fw = empty_no_deconstruct_frame_return_block;
- }
+ bool fallthru = true;
+ for (int i = block->code_start(); i < block->code_end(); ++i) {
+ Instruction* instr = code->InstructionAt(i);
+ if (!instr->AreMovesRedundant()) {
+ // can't skip instructions with non redundant moves.
+ TRACE(" parallel move\n");
+ fallthru = false;
+ } else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+ // can't skip instructions with flags continuations.
+ TRACE(" flags\n");
+ fallthru = false;
+ } else if (instr->IsNop()) {
+ // skip nops.
+ TRACE(" nop\n");
+ continue;
+ } else if (instr->arch_opcode() == kArchJmp) {
+ // try to forward the jump instruction.
+ TRACE(" jmp\n");
+ // if this block deconstructs the frame, we can't forward it.
+ // TODO(mtrofin): we can still forward if we end up building
+ // the frame at start. So we should move the decision of whether
+ // to build a frame or not in the register allocator, and trickle it
+ // here and to the code generator.
+ if (frame_at_start || !(block->must_deconstruct_frame() ||
+ block->must_construct_frame())) {
+ fw = code->InputRpo(instr, 0);
+ }
+ fallthru = false;
+ } else if (instr->IsRet()) {
+ TRACE(" ret\n");
+ if (fallthru) {
+ CHECK_IMPLIES(block->must_construct_frame(),
+ block->must_deconstruct_frame());
+ // Only handle returns with immediate/constant operands, since
+ // they must always be the same for all returns in a function.
+ // Dynamic return values might use different registers at
+ // different return sites and therefore cannot be shared.
+ if (instr->InputAt(0)->IsImmediate()) {
+ int32_t return_size = ImmediateOperand::cast(instr->InputAt(0))
+ ->inline_int32_value();
+ // Instructions can be shared only for blocks that share
+ // the same |must_deconstruct_frame| attribute.
+ if (block->must_deconstruct_frame()) {
+ if (empty_deconstruct_frame_return_block ==
+ RpoNumber::Invalid()) {
+ empty_deconstruct_frame_return_block = block->rpo_number();
+ empty_deconstruct_frame_return_size = return_size;
+ } else if (empty_deconstruct_frame_return_size == return_size) {
+ fw = empty_deconstruct_frame_return_block;
+ block->clear_must_deconstruct_frame();
+ }
+ } else {
+ if (empty_no_deconstruct_frame_return_block ==
+ RpoNumber::Invalid()) {
+ empty_no_deconstruct_frame_return_block = block->rpo_number();
+ empty_no_deconstruct_frame_return_size = return_size;
+ } else if (empty_no_deconstruct_frame_return_size ==
+ return_size) {
+ fw = empty_no_deconstruct_frame_return_block;
}
}
}
- fallthru = false;
- } else {
- // can't skip other instructions.
- TRACE(" other\n");
- fallthru = false;
}
- break;
- }
- if (fallthru) {
- int next = 1 + block->rpo_number().ToInt();
- if (next < code->InstructionBlockCount())
- fw = RpoNumber::FromInt(next);
+ fallthru = false;
+ } else {
+ // can't skip other instructions.
+ TRACE(" other\n");
+ fallthru = false;
}
+ break;
+ }
+ if (fallthru) {
+ int next = 1 + block->rpo_number().ToInt();
+ if (next < code->InstructionBlockCount()) fw = RpoNumber::FromInt(next);
}
state.Forward(fw);
}
@@ -225,7 +209,7 @@ void JumpThreading::ApplyForwarding(Zone* local_zone,
for (int i = block->code_start(); i < block->code_end(); ++i) {
Instruction* instr = code->InstructionAt(i);
FlagsMode mode = FlagsModeField::decode(instr->opcode());
- if (mode == kFlags_branch || mode == kFlags_branch_and_poison) {
+ if (mode == kFlags_branch) {
fallthru = false; // branches don't fall through to the next block.
} else if (instr->arch_opcode() == kArchJmp ||
instr->arch_opcode() == kArchRet) {
diff --git a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc
new file mode 100644
index 0000000000..0397a36145
--- /dev/null
+++ b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc
@@ -0,0 +1,2636 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/optimized-compilation-info.h"
+#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/code-generator.h"
+#include "src/compiler/backend/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
+#include "src/heap/memory-chunk.h"
+
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ tasm()->
+
+// TODO(LOONG_dev): consider renaming these macros.
+#define TRACE_MSG(msg) \
+ PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
+ __LINE__)
+
+#define TRACE_UNIMPL() \
+ PrintF("UNIMPLEMENTED code_generator_loong64: %s at line %d\n", \
+ __FUNCTION__, __LINE__)
+
+// Adds Loong64-specific methods to convert InstructionOperands.
+class Loong64OperandConverter final : public InstructionOperandConverter {
+ public:
+ Loong64OperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ FloatRegister OutputSingleRegister(size_t index = 0) {
+ return ToSingleRegister(instr_->OutputAt(index));
+ }
+
+ FloatRegister InputSingleRegister(size_t index) {
+ return ToSingleRegister(instr_->InputAt(index));
+ }
+
+ FloatRegister ToSingleRegister(InstructionOperand* op) {
+ // Single (Float) and Double register namespace is same on LOONG64,
+ // both are typedefs of FPURegister.
+ return ToDoubleRegister(op);
+ }
+
+ Register InputOrZeroRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) {
+ DCHECK_EQ(0, InputInt32(index));
+ return zero_reg;
+ }
+ return InputRegister(index);
+ }
+
+ DoubleRegister InputOrZeroDoubleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputDoubleRegister(index);
+ }
+
+ DoubleRegister InputOrZeroSingleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputSingleRegister(index);
+ }
+
+ Operand InputImmediate(size_t index) {
+ Constant constant = ToConstant(instr_->InputAt(index));
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Operand(constant.ToInt32());
+ case Constant::kInt64:
+ return Operand(constant.ToInt64());
+ case Constant::kFloat32:
+ return Operand::EmbeddedNumber(constant.ToFloat32());
+ case Constant::kFloat64:
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
+ case Constant::kExternalReference:
+ case Constant::kCompressedHeapObject:
+ case Constant::kHeapObject:
+ break;
+ case Constant::kDelayedStringConstant:
+ return Operand::EmbeddedStringConstant(
+ constant.ToDelayedStringConstant());
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(titzer): RPO immediates on loong64?
+ }
+ UNREACHABLE();
+ }
+
+ Operand InputOperand(size_t index) {
+ InstructionOperand* op = instr_->InputAt(index);
+ if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ }
+ return InputImmediate(index);
+ }
+
+ MemOperand MemoryOperand(size_t* first_index) {
+ const size_t index = *first_index;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_None:
+ break;
+ case kMode_Root:
+ *first_index += 1;
+ return MemOperand(kRootRegister, InputInt32(index));
+ case kMode_MRI:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ case kMode_MRR:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+ }
+ UNREACHABLE();
+ }
+
+ MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
+
+ MemOperand ToMemOperand(InstructionOperand* op) const {
+ DCHECK_NOT_NULL(op);
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
+ return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+ }
+
+ MemOperand SlotToMemOperand(int slot) const {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
+ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+ }
+};
+
+static inline bool HasRegisterInput(Instruction* instr, size_t index) {
+ return instr->InputAt(index)->IsRegister();
+}
+
+namespace {
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand offset,
+ Register value, RecordWriteMode mode,
+ StubCallMode stub_mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ offset_(offset),
+ value_(value),
+ mode_(mode),
+#if V8_ENABLE_WEBASSEMBLY
+ stub_mode_(stub_mode),
+#endif // V8_ENABLE_WEBASSEMBLY
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ zone_(gen->zone()) {
+ }
+
+ void Generate() final {
+ __ CheckPageFlag(value_, MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ RememberedSetAction const remembered_set_action =
+ mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
+ SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
+ if (must_save_lr_) {
+ // We need to save and restore ra if the frame was elided.
+ __ Push(ra);
+ }
+ if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
+ __ CallEphemeronKeyBarrier(object_, offset_, save_fp_mode);
+#if V8_ENABLE_WEBASSEMBLY
+ } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ CallRecordWriteStubSaveRegisters(object_, offset_,
+ remembered_set_action, save_fp_mode,
+ StubCallMode::kCallWasmRuntimeStub);
+#endif // V8_ENABLE_WEBASSEMBLY
+ } else {
+ __ CallRecordWriteStubSaveRegisters(object_, offset_,
+ remembered_set_action, save_fp_mode);
+ }
+ if (must_save_lr_) {
+ __ Pop(ra);
+ }
+ }
+
+ private:
+ Register const object_;
+ Operand const offset_;
+ Register const value_;
+ RecordWriteMode const mode_;
+#if V8_ENABLE_WEBASSEMBLY
+ StubCallMode const stub_mode_;
+#endif // V8_ENABLE_WEBASSEMBLY
+ bool must_save_lr_;
+ Zone* zone_;
+};
+
+#define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \
+ class ool_name final : public OutOfLineCode { \
+ public: \
+ ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
+ : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
+ \
+ void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \
+ \
+ private: \
+ T const dst_; \
+ T const src1_; \
+ T const src2_; \
+ }
+
+CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, FPURegister);
+
+#undef CREATE_OOL_CLASS
+
+Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kUnsignedLessThan:
+ return lo;
+ case kUnsignedGreaterThanOrEqual:
+ return hs;
+ case kUnsignedLessThanOrEqual:
+ return ls;
+ case kUnsignedGreaterThan:
+ return hi;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ break;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+Condition FlagsConditionToConditionTst(FlagsCondition condition) {
+ switch (condition) {
+ case kNotEqual:
+ return ne;
+ case kEqual:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
+ switch (condition) {
+ case kOverflow:
+ return ne;
+ case kNotOverflow:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
+ FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ *predicate = true;
+ return CEQ;
+ case kNotEqual:
+ *predicate = false;
+ return CEQ;
+ case kUnsignedLessThan:
+ *predicate = true;
+ return CLT;
+ case kUnsignedGreaterThanOrEqual:
+ *predicate = false;
+ return CLT;
+ case kUnsignedLessThanOrEqual:
+ *predicate = true;
+ return CLE;
+ case kUnsignedGreaterThan:
+ *predicate = false;
+ return CLE;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ *predicate = true;
+ break;
+ default:
+ *predicate = true;
+ break;
+ }
+ UNREACHABLE();
+}
+
+} // namespace
+
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ dbar(0); \
+ __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
+ __ dbar(0); \
+ } while (0)
+
+// only use for sub_w and sub_d
+#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
+ do { \
+ Label binop; \
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ dbar(0); \
+ __ bind(&binop); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \
+ size, bin_instr, representation) \
+ do { \
+ Label binop; \
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (representation == 32) { \
+ __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ andi(i.TempRegister(3), i.TempRegister(0), 0x7); \
+ } \
+ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(3))); \
+ __ slli_w(i.TempRegister(3), i.TempRegister(3), 3); \
+ __ dbar(0); \
+ __ bind(&binop); \
+ __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \
+ size, sign_extend); \
+ __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \
+ size); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \
+ load_linked, store_conditional, sign_extend, size, representation) \
+ do { \
+ Label exchange; \
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (representation == 32) { \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \
+ } \
+ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(1))); \
+ __ slli_w(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ dbar(0); \
+ __ bind(&exchange); \
+ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
+ size); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
+ store_conditional) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ dbar(0); \
+ __ bind(&compareExchange); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ mov(i.TempRegister(2), i.InputRegister(3)); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
+ load_linked, store_conditional, sign_extend, size, representation) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (representation == 32) { \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \
+ } \
+ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(1))); \
+ __ slli_w(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ dbar(0); \
+ __ bind(&compareExchange); \
+ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ ExtractBits(i.InputRegister(2), i.InputRegister(2), zero_reg, size, \
+ sign_extend); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
+ size); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ dbar(0); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ UseScratchRegisterScope temps(tasm()); \
+ Register scratch = temps.Acquire(); \
+ __ PrepareCallCFunction(0, 2, scratch); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ UseScratchRegisterScope temps(tasm()); \
+ Register scratch = temps.Acquire(); \
+ __ PrepareCallCFunction(0, 1, scratch); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
+ } while (0)
+
+#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \
+ do { \
+ __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.InputSimd128Register(1)); \
+ } while (0)
+
+void CodeGenerator::AssembleDeconstructFrame() {
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+}
+
+void CodeGenerator::AssemblePrepareTailCall() {
+ if (frame_access_state()->has_frame()) {
+ __ Ld_d(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ Ld_d(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ }
+ frame_access_state()->SetFrameAccessToSP();
+}
+
+namespace {
+
+void AdjustStackPointerForTailCall(TurboAssembler* tasm,
+ FrameAccessState* state,
+ int new_slot_above_sp,
+ bool allow_shrinkage = true) {
+ int current_sp_offset = state->GetSPToFPSlotCount() +
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ if (stack_slot_delta > 0) {
+ tasm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize);
+ state->IncreaseSPDelta(stack_slot_delta);
+ } else if (allow_shrinkage && stack_slot_delta < 0) {
+ tasm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize);
+ state->IncreaseSPDelta(stack_slot_delta);
+ }
+}
+
+} // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+ int first_unused_slot_offset) {
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ first_unused_slot_offset, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+ int first_unused_slot_offset) {
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ first_unused_slot_offset);
+}
+
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ ComputeCodeStartAddress(scratch);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
+ kJavaScriptCallCodeStartRegister, Operand(scratch));
+}
+
+// Check if the code object is marked for deoptimization. If it is, then it
+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
+// to:
+// 1. read from memory the word that contains that bit, which can be found in
+// the flags in the referenced {CodeDataContainer} object;
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
+void CodeGenerator::BailoutIfDeoptimized() {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ Ld_d(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ __ Ld_w(scratch, FieldMemOperand(
+ scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
+ RelocInfo::CODE_TARGET, ne, scratch, Operand(zero_reg));
+}
+
+// Assembles an instruction after register allocation, producing machine code.
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
+ Loong64OperandConverter i(this, instr);
+ InstructionCode opcode = instr->opcode();
+ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+ switch (arch_opcode) {
+ case kArchCallCodeObject: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ CallCodeObject(reg);
+ }
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchCallBuiltinPointer: {
+ DCHECK(!instr->InputAt(0)->IsImmediate());
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ case kArchCallWasmFunction: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ __ Call(wasm_code, constant.rmode());
+ } else {
+ __ Call(i.InputRegister(0));
+ }
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallWasm: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ __ Jump(wasm_code, constant.rmode());
+ } else {
+ __ Jump(i.InputRegister(0));
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ case kArchTailCallCodeObject: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ JumpCodeObject(reg);
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchTailCallAddress: {
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ Jump(reg);
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchCallJSFunction: {
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ // Check the function's context matches the context argument.
+ __ Ld_d(scratch, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ Assert(eq, AbortReason::kWrongFunctionContext, cp, Operand(scratch));
+ }
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld_d(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ CallCodeObject(a2);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchPrepareCallCFunction: {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, scratch);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
+ break;
+ }
+ case kArchSaveCallerRegisters: {
+ fp_mode_ =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
+ // kReturnRegister0 should have been saved before entering the stub.
+ int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
+ DCHECK(IsAligned(bytes, kSystemPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
+ DCHECK(!caller_registers_saved_);
+ caller_registers_saved_ = true;
+ break;
+ }
+ case kArchRestoreCallerRegisters: {
+ DCHECK(fp_mode_ ==
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
+ // Don't overwrite the returned value.
+ int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ DCHECK(caller_registers_saved_);
+ caller_registers_saved_ = false;
+ break;
+ }
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall();
+ break;
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+#if V8_ENABLE_WEBASSEMBLY
+ Label start_call;
+ bool isWasmCapiFunction =
+ linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
+ // from start_call to return address.
+ int offset = __ root_array_available() ? 36 : 80; // 9 or 20 instrs
+#endif // V8_ENABLE_WEBASSEMBLY
+#if V8_HOST_ARCH_LOONG64
+ if (FLAG_debug_code) {
+ offset += 12; // see CallCFunction
+ }
+#endif
+#if V8_ENABLE_WEBASSEMBLY
+ if (isWasmCapiFunction) {
+ __ bind(&start_call);
+ __ pcaddi(t7, -4);
+ __ St_d(t7, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ if (isWasmCapiFunction) {
+ CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
+ RecordSafepoint(instr->reference_map());
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ frame_access_state()->SetFrameAccessToDefault();
+ // Ideally, we should decrement SP delta to match the change of stack
+ // pointer in CallCFunction. However, for certain architectures (e.g.
+ // ARM), there may be more strict alignment requirement, causing old SP
+ // to be saved on the stack. In those cases, we can not calculate the SP
+ // delta statically.
+ frame_access_state()->ClearSPDelta();
+ if (caller_registers_saved_) {
+ // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
+ // Here, we assume the sequence to be:
+ // kArchSaveCallerRegisters;
+ // kArchCallCFunction;
+ // kArchRestoreCallerRegisters;
+ int bytes =
+ __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
+ }
+ break;
+ }
+ case kArchJmp:
+ AssembleArchJump(i.InputRpo(0));
+ break;
+ case kArchBinarySearchSwitch:
+ AssembleArchBinarySearchSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
+ case kArchAbortCSAAssert:
+ DCHECK(i.InputRegister(0) == a0);
+ {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(tasm(), StackFrame::NONE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
+ }
+ __ stop();
+ break;
+ case kArchDebugBreak:
+ __ DebugBreak();
+ break;
+ case kArchComment:
+ __ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
+ break;
+ case kArchNop:
+ case kArchThrowTerminator:
+ // don't emit code for nops.
+ break;
+ case kArchDeoptimize: {
+ DeoptimizationExit* exit =
+ BuildTranslation(instr, -1, 0, 0, OutputFrameStateCombine::Ignore());
+ __ Branch(exit->label());
+ break;
+ }
+ case kArchRet:
+ AssembleReturn(instr->InputAt(0));
+ break;
+ case kArchStackPointerGreaterThan: {
+ Register lhs_register = sp;
+ uint32_t offset;
+ if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
+ lhs_register = i.TempRegister(1);
+ __ Sub_d(lhs_register, sp, offset);
+ }
+ __ Sltu(i.TempRegister(0), i.InputRegister(0), lhs_register);
+ break;
+ }
+ case kArchStackCheckOffset:
+ __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset()));
+ break;
+ case kArchFramePointer:
+ __ mov(i.OutputRegister(), fp);
+ break;
+ case kArchParentFramePointer:
+ if (frame_access_state()->has_frame()) {
+ __ Ld_d(i.OutputRegister(), MemOperand(fp, 0));
+ } else {
+ __ mov(i.OutputRegister(), fp);
+ }
+ break;
+ case kArchTruncateDoubleToI:
+ __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
+ i.InputDoubleRegister(0), DetermineStubCallMode());
+ break;
+ case kArchStoreWithWriteBarrier: // Fall through.
+ case kArchAtomicStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ AddressingMode addressing_mode =
+ AddressingModeField::decode(instr->opcode());
+ Register object = i.InputRegister(0);
+ Operand offset(zero_reg);
+ if (addressing_mode == kMode_MRI) {
+ offset = Operand(i.InputInt64(1));
+ } else {
+ DCHECK_EQ(addressing_mode, kMode_MRR);
+ offset = Operand(i.InputRegister(1));
+ }
+ Register value = i.InputRegister(2);
+
+ auto ool = zone()->New<OutOfLineRecordWrite>(
+ this, object, offset, value, mode, DetermineStubCallMode());
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ if (addressing_mode == kMode_MRI) {
+ __ St_d(value, MemOperand(object, i.InputInt64(1)));
+ } else {
+ DCHECK_EQ(addressing_mode, kMode_MRR);
+ __ St_d(value, MemOperand(object, i.InputRegister(1)));
+ }
+ } else {
+ DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
+ DCHECK_EQ(addressing_mode, kMode_MRI);
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ Add_d(scratch, object, Operand(i.InputInt64(1)));
+ __ amswap_db_d(zero_reg, value, scratch);
+ }
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
+ __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
+ ne, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kArchStackSlot: {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ FrameOffset offset =
+ frame_access_state()->GetFrameOffset(i.InputInt32(0));
+ Register base_reg = offset.from_stack_pointer() ? sp : fp;
+ __ Add_d(i.OutputRegister(), base_reg, Operand(offset.offset()));
+ if (FLAG_debug_code) {
+ // Verify that the output_register is properly aligned
+ __ And(scratch, i.OutputRegister(), Operand(kSystemPointerSize - 1));
+ __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, scratch,
+ Operand(zero_reg));
+ }
+ break;
+ }
+ case kIeee754Float64Acos:
+ ASSEMBLE_IEEE754_UNOP(acos);
+ break;
+ case kIeee754Float64Acosh:
+ ASSEMBLE_IEEE754_UNOP(acosh);
+ break;
+ case kIeee754Float64Asin:
+ ASSEMBLE_IEEE754_UNOP(asin);
+ break;
+ case kIeee754Float64Asinh:
+ ASSEMBLE_IEEE754_UNOP(asinh);
+ break;
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Cosh:
+ ASSEMBLE_IEEE754_UNOP(cosh);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Pow:
+ ASSEMBLE_IEEE754_BINOP(pow);
+ break;
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Sinh:
+ ASSEMBLE_IEEE754_UNOP(sinh);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
+ case kIeee754Float64Tanh:
+ ASSEMBLE_IEEE754_UNOP(tanh);
+ break;
+ case kLoong64Add_w:
+ __ Add_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Add_d:
+ __ Add_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64AddOvf_d:
+ __ AddOverflow_d(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), t8);
+ break;
+ case kLoong64Sub_w:
+ __ Sub_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Sub_d:
+ __ Sub_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64SubOvf_d:
+ __ SubOverflow_d(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), t8);
+ break;
+ case kLoong64Mul_w:
+ __ Mul_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64MulOvf_w:
+ __ MulOverflow_w(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), t8);
+ break;
+ case kLoong64Mulh_w:
+ __ Mulh_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mulh_wu:
+ __ Mulh_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mulh_d:
+ __ Mulh_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Div_w:
+ __ Div_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kLoong64Div_wu:
+ __ Div_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kLoong64Mod_w:
+ __ Mod_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mod_wu:
+ __ Mod_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mul_d:
+ __ Mul_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Div_d:
+ __ Div_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kLoong64Div_du:
+ __ Div_du(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kLoong64Mod_d:
+ __ Mod_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mod_du:
+ __ Mod_du(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Alsl_d:
+ DCHECK(instr->InputAt(2)->IsImmediate());
+ __ Alsl_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt8(2), t7);
+ break;
+ case kLoong64Alsl_w:
+ DCHECK(instr->InputAt(2)->IsImmediate());
+ __ Alsl_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt8(2), t7);
+ break;
+ case kLoong64And:
+ case kLoong64And32:
+ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Or:
+ case kLoong64Or32:
+ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Nor:
+ case kLoong64Nor32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ } else {
+ DCHECK_EQ(0, i.InputOperand(1).immediate());
+ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
+ }
+ break;
+ case kLoong64Xor:
+ case kLoong64Xor32:
+ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Clz_w:
+ __ clz_w(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Clz_d:
+ __ clz_d(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Sll_w:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sll_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ slli_w(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Srl_w:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ srl_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ srli_w(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Sra_w:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sra_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ srai_w(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Bstrpick_w:
+ __ bstrpick_w(i.OutputRegister(), i.InputRegister(0),
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ break;
+ case kLoong64Bstrins_w:
+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
+ __ bstrins_w(i.OutputRegister(), zero_reg,
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ } else {
+ __ bstrins_w(i.OutputRegister(), i.InputRegister(0),
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ }
+ break;
+ case kLoong64Bstrpick_d: {
+ __ bstrpick_d(i.OutputRegister(), i.InputRegister(0),
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ break;
+ }
+ case kLoong64Bstrins_d:
+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
+ __ bstrins_d(i.OutputRegister(), zero_reg,
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ } else {
+ __ bstrins_d(i.OutputRegister(), i.InputRegister(0),
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ }
+ break;
+ case kLoong64Sll_d:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sll_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ slli_d(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Srl_d:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ srl_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ srli_d(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Sra_d:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sra_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ srai_d(i.OutputRegister(), i.InputRegister(0), imm);
+ }
+ break;
+ case kLoong64Rotr_w:
+ __ Rotr_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Rotr_d:
+ __ Rotr_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Tst:
+ __ And(t8, i.InputRegister(0), i.InputOperand(1));
+ // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ break;
+ case kLoong64Cmp:
+ // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ break;
+ case kLoong64Mov:
+ // TODO(LOONG_dev): Should we combine mov/li, or use separate instr?
+ // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
+ if (HasRegisterInput(instr, 0)) {
+ __ mov(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ li(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+
+ case kLoong64Float32Cmp: {
+ FPURegister left = i.InputOrZeroSingleRegister(0);
+ FPURegister right = i.InputOrZeroSingleRegister(1);
+ bool predicate;
+ FPUCondition cc =
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
+
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+
+ __ CompareF32(left, right, cc);
+ } break;
+ case kLoong64Float32Add:
+ __ fadd_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float32Sub:
+ __ fsub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float32Mul:
+ __ fmul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float32Div:
+ __ fdiv_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float32Abs:
+ __ fabs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ case kLoong64Float32Neg:
+ __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ case kLoong64Float32Sqrt: {
+ __ fsqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32Min: {
+ FPURegister dst = i.OutputSingleRegister();
+ FPURegister src1 = i.InputSingleRegister(0);
+ FPURegister src2 = i.InputSingleRegister(1);
+ auto ool = zone()->New<OutOfLineFloat32Min>(this, dst, src1, src2);
+ __ Float32Min(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kLoong64Float32Max: {
+ FPURegister dst = i.OutputSingleRegister();
+ FPURegister src1 = i.InputSingleRegister(0);
+ FPURegister src2 = i.InputSingleRegister(1);
+ auto ool = zone()->New<OutOfLineFloat32Max>(this, dst, src1, src2);
+ __ Float32Max(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kLoong64Float64Cmp: {
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ bool predicate;
+ FPUCondition cc =
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+
+ __ CompareF64(left, right, cc);
+ } break;
+ case kLoong64Float64Add:
+ __ fadd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float64Sub:
+ __ fsub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float64Mul:
+ // TODO(LOONG_dev): LOONG64 add special case: right op is -1.0, see arm
+ // port.
+ __ fmul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float64Div:
+ __ fdiv_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float64Mod: {
+ // TODO(turbofan): implement directly.
+ FrameScope scope(tasm(), StackFrame::MANUAL);
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
+ break;
+ }
+ case kLoong64Float64Abs:
+ __ fabs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64Neg:
+ __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64Sqrt: {
+ __ fsqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float64Min: {
+ FPURegister dst = i.OutputDoubleRegister();
+ FPURegister src1 = i.InputDoubleRegister(0);
+ FPURegister src2 = i.InputDoubleRegister(1);
+ auto ool = zone()->New<OutOfLineFloat64Min>(this, dst, src1, src2);
+ __ Float64Min(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kLoong64Float64Max: {
+ FPURegister dst = i.OutputDoubleRegister();
+ FPURegister src1 = i.InputDoubleRegister(0);
+ FPURegister src2 = i.InputDoubleRegister(1);
+ auto ool = zone()->New<OutOfLineFloat64Max>(this, dst, src1, src2);
+ __ Float64Max(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kLoong64Float64RoundDown: {
+ __ Floor_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32RoundDown: {
+ __ Floor_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ }
+ case kLoong64Float64RoundTruncate: {
+ __ Trunc_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32RoundTruncate: {
+ __ Trunc_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ }
+ case kLoong64Float64RoundUp: {
+ __ Ceil_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32RoundUp: {
+ __ Ceil_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ }
+ case kLoong64Float64RoundTiesEven: {
+ __ Round_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32RoundTiesEven: {
+ __ Round_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ }
+ case kLoong64Float64SilenceNaN:
+ __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64ToFloat32:
+ __ fcvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float32ToFloat64:
+ __ fcvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
+ break;
+ case kLoong64Int32ToFloat64: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ movgr2fr_w(scratch, i.InputRegister(0));
+ __ ffint_d_w(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kLoong64Int32ToFloat32: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ movgr2fr_w(scratch, i.InputRegister(0));
+ __ ffint_s_w(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kLoong64Uint32ToFloat32: {
+ __ Ffint_s_uw(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kLoong64Int64ToFloat32: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ movgr2fr_d(scratch, i.InputRegister(0));
+ __ ffint_s_l(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kLoong64Int64ToFloat64: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ movgr2fr_d(scratch, i.InputRegister(0));
+ __ ffint_d_l(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kLoong64Uint32ToFloat64: {
+ __ Ffint_d_uw(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kLoong64Uint64ToFloat64: {
+ __ Ffint_d_ul(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kLoong64Uint64ToFloat32: {
+ __ Ffint_s_ul(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kLoong64Float64ToInt32: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ ftintrz_w_d(scratch, i.InputDoubleRegister(0));
+ __ movfr2gr_s(i.OutputRegister(), scratch);
+ break;
+ }
+ case kLoong64Float32ToInt32: {
+ FPURegister scratch_d = kScratchDoubleReg;
+ bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
+ __ ftintrz_w_s(scratch_d, i.InputDoubleRegister(0));
+ __ movfr2gr_s(i.OutputRegister(), scratch_d);
+ if (set_overflow_to_min_i32) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
+ // because INT32_MIN allows easier out-of-bounds detection.
+ __ addi_w(scratch, i.OutputRegister(), 1);
+ __ slt(scratch, scratch, i.OutputRegister());
+ __ add_w(i.OutputRegister(), i.OutputRegister(), scratch);
+ }
+ break;
+ }
+ case kLoong64Float32ToInt64: {
+ FPURegister scratch_d = kScratchDoubleReg;
+
+ bool load_status = instr->OutputCount() > 1;
+ // Other arches use round to zero here, so we follow.
+ __ ftintrz_l_s(scratch_d, i.InputDoubleRegister(0));
+ __ movfr2gr_d(i.OutputRegister(), scratch_d);
+ if (load_status) {
+ Register output2 = i.OutputRegister(1);
+ __ movfcsr2gr(output2, FCSR2);
+ // Check for overflow and NaNs.
+ __ And(output2, output2,
+ kFCSROverflowCauseMask | kFCSRInvalidOpCauseMask);
+ __ Slt(output2, zero_reg, output2);
+ __ xori(output2, output2, 1);
+ }
+ break;
+ }
+ case kLoong64Float64ToInt64: {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ FPURegister scratch_d = kScratchDoubleReg;
+
+ bool set_overflow_to_min_i64 = MiscField::decode(instr->opcode());
+ bool load_status = instr->OutputCount() > 1;
+ // Other arches use round to zero here, so we follow.
+ __ ftintrz_l_d(scratch_d, i.InputDoubleRegister(0));
+ __ movfr2gr_d(i.OutputRegister(0), scratch_d);
+ if (load_status) {
+ Register output2 = i.OutputRegister(1);
+ __ movfcsr2gr(output2, FCSR2);
+ // Check for overflow and NaNs.
+ __ And(output2, output2,
+ kFCSROverflowCauseMask | kFCSRInvalidOpCauseMask);
+ __ Slt(output2, zero_reg, output2);
+ __ xori(output2, output2, 1);
+ }
+ if (set_overflow_to_min_i64) {
+ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
+ // because INT64_MIN allows easier out-of-bounds detection.
+ __ addi_d(scratch, i.OutputRegister(), 1);
+ __ slt(scratch, scratch, i.OutputRegister());
+ __ add_d(i.OutputRegister(), i.OutputRegister(), scratch);
+ }
+ break;
+ }
+ case kLoong64Float64ToUint32: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ Ftintrz_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
+ break;
+ }
+ case kLoong64Float32ToUint32: {
+ FPURegister scratch = kScratchDoubleReg;
+ bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
+ __ Ftintrz_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
+ if (set_overflow_to_min_i32) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
+ // because 0 allows easier out-of-bounds detection.
+ __ addi_w(scratch, i.OutputRegister(), 1);
+ __ Movz(i.OutputRegister(), zero_reg, scratch);
+ }
+ break;
+ }
+ case kLoong64Float32ToUint64: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Ftintrz_ul_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch,
+ result);
+ break;
+ }
+ case kLoong64Float64ToUint64: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Ftintrz_ul_d(i.OutputRegister(0), i.InputDoubleRegister(0), scratch,
+ result);
+ break;
+ }
+ case kLoong64BitcastDL:
+ __ movfr2gr_d(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64BitcastLD:
+ __ movgr2fr_d(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Float64ExtractLowWord32:
+ __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64ExtractHighWord32:
+ __ movfrh2gr_s(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64InsertLowWord32:
+ __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
+ case kLoong64Float64InsertHighWord32:
+ __ movgr2frh_w(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
+ // ... more basic instructions ...
+
+ case kLoong64Ext_w_b:
+ __ ext_w_b(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Ext_w_h:
+ __ ext_w_h(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Ld_bu:
+ __ Ld_bu(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Ld_b:
+ __ Ld_b(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64St_b:
+ __ St_b(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kLoong64Ld_hu:
+ __ Ld_hu(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Ld_h:
+ __ Ld_h(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64St_h:
+ __ St_h(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kLoong64Ld_w:
+ __ Ld_w(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Ld_wu:
+ __ Ld_wu(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Ld_d:
+ __ Ld_d(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64St_w:
+ __ St_w(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kLoong64St_d:
+ __ St_d(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kLoong64Fld_s: {
+ __ Fld_s(i.OutputSingleRegister(), i.MemoryOperand());
+ break;
+ }
+ case kLoong64Fst_s: {
+ size_t index = 0;
+ MemOperand operand = i.MemoryOperand(&index);
+ FPURegister ft = i.InputOrZeroSingleRegister(index);
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+
+ __ Fst_s(ft, operand);
+ break;
+ }
+ case kLoong64Fld_d:
+ __ Fld_d(i.OutputDoubleRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Fst_d: {
+ FPURegister ft = i.InputOrZeroDoubleRegister(2);
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+
+ __ Fst_d(ft, i.MemoryOperand());
+ break;
+ }
+ case kLoong64Dbar: {
+ __ dbar(0);
+ break;
+ }
+ case kLoong64Push:
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ Fst_d(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ __ Sub_d(sp, sp, Operand(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
+ } else {
+ __ Push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ }
+ break;
+ case kLoong64Peek: {
+ int reverse_slot = i.InputInt32(0);
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ Fld_d(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else if (op->representation() == MachineRepresentation::kFloat32) {
+ __ Fld_s(i.OutputSingleRegister(0), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
+ abort();
+ }
+ } else {
+ __ Ld_d(i.OutputRegister(0), MemOperand(fp, offset));
+ }
+ break;
+ }
+ case kLoong64StackClaim: {
+ __ Sub_d(sp, sp, Operand(i.InputInt32(0)));
+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) /
+ kSystemPointerSize);
+ break;
+ }
+ case kLoong64Poke: {
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ Fst_d(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ } else {
+ __ St_d(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
+ }
+ break;
+ }
+ case kLoong64ByteSwap64: {
+ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 8);
+ break;
+ }
+ case kLoong64ByteSwap32: {
+ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
+ break;
+ }
+ case kAtomicLoadInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_b);
+ break;
+ case kAtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_bu);
+ break;
+ case kAtomicLoadInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_h);
+ break;
+ case kAtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_hu);
+ break;
+ case kAtomicLoadWord32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_w);
+ break;
+ case kLoong64Word64AtomicLoadUint32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_wu);
+ break;
+ case kLoong64Word64AtomicLoadUint64:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_d);
+ break;
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_b);
+ break;
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_h);
+ break;
+ case kAtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_w);
+ break;
+ case kLoong64StoreCompressTagged:
+ case kLoong64Word64AtomicStoreWord64:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_d);
+ break;
+ case kAtomicExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 8, 32);
+ break;
+ case kAtomicExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 8, 64);
+ break;
+ }
+ break;
+ case kAtomicExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 16, 32);
+ break;
+ case kAtomicExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 16, 64);
+ break;
+ }
+ break;
+ case kAtomicExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amswap_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 32, 64);
+ break;
+ }
+ break;
+ case kLoong64Word64AtomicExchangeUint64:
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amswap_db_d(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case kAtomicCompareExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 8, 32);
+ break;
+ case kAtomicCompareExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 8,
+ 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 8,
+ 64);
+ break;
+ }
+ break;
+ case kAtomicCompareExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 16, 32);
+ break;
+ case kAtomicCompareExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 16,
+ 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 16,
+ 64);
+ break;
+ }
+ break;
+ case kAtomicCompareExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ slli_w(i.InputRegister(2), i.InputRegister(2), 0);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll_w, Sc_w);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 32,
+ 64);
+ break;
+ }
+ break;
+ case kLoong64Word64AtomicCompareExchangeUint64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll_d, Sc_d);
+ break;
+ case kAtomicAddWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amadd_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, Add_d, 64);
+ break;
+ }
+ break;
+ case kAtomicSubWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_BINOP(Ll_w, Sc_w, Sub_w);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, Sub_d, 64);
+ break;
+ }
+ break;
+ case kAtomicAndWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amand_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, And, 64);
+ break;
+ }
+ break;
+ case kAtomicOrWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amor_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, Or, 64);
+ break;
+ }
+ break;
+ case kAtomicXorWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amxor_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, Xor, 64);
+ break;
+ }
+ break;
+#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
+ case kAtomic##op##Int8: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, true, 8, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint8: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, false, 8, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 8, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Int16: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, true, 16, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint16: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, false, 16, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 16, inst64, 64); \
+ break; \
+ } \
+ break;
+ ATOMIC_BINOP_CASE(Add, Add_w, Add_d)
+ ATOMIC_BINOP_CASE(Sub, Sub_w, Sub_d)
+ ATOMIC_BINOP_CASE(And, And, And)
+ ATOMIC_BINOP_CASE(Or, Or, Or)
+ ATOMIC_BINOP_CASE(Xor, Xor, Xor)
+#undef ATOMIC_BINOP_CASE
+
+ case kLoong64Word64AtomicAddUint64:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amadd_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0));
+ break;
+ case kLoong64Word64AtomicSubUint64:
+ ASSEMBLE_ATOMIC_BINOP(Ll_d, Sc_d, Sub_d);
+ break;
+ case kLoong64Word64AtomicAndUint64:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amand_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0));
+ break;
+ case kLoong64Word64AtomicOrUint64:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amor_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0));
+ break;
+ case kLoong64Word64AtomicXorUint64:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amxor_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0));
+ break;
+#undef ATOMIC_BINOP_CASE
+ case kLoong64S128Const:
+ case kLoong64S128Zero:
+ case kLoong64I32x4Splat:
+ case kLoong64I32x4ExtractLane:
+ case kLoong64I32x4Add:
+ case kLoong64I32x4ReplaceLane:
+ case kLoong64I32x4Sub:
+ case kLoong64F64x2Abs:
+ default:
+ break;
+ }
+ return kSuccess;
+}
+
+#define UNSUPPORTED_COND(opcode, condition) \
+ StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
+ << "\""; \
+ UNIMPLEMENTED();
+
+void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
+ Instruction* instr, FlagsCondition condition,
+ Label* tlabel, Label* flabel, bool fallthru) {
+#undef __
+#define __ tasm->
+ Loong64OperandConverter i(gen, instr);
+
+ Condition cc = kNoCondition;
+ // LOONG64 does not have condition code flags, so compare and branch are
+ // implemented differently than on the other arch's. The compare operations
+ // emit loong64 pseudo-instructions, which are handled here by branch
+ // instructions that do the actual comparison. Essential that the input
+ // registers to compare pseudo-op are not modified before this branch op, as
+ // they are tested here.
+
+ if (instr->arch_opcode() == kLoong64Tst) {
+ cc = FlagsConditionToConditionTst(condition);
+ __ Branch(tlabel, cc, t8, Operand(zero_reg));
+ } else if (instr->arch_opcode() == kLoong64Add_d ||
+ instr->arch_opcode() == kLoong64Sub_d) {
+ UseScratchRegisterScope temps(tasm);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ cc = FlagsConditionToConditionOvf(condition);
+ __ srai_d(scratch, i.OutputRegister(), 32);
+ __ srai_w(scratch2, i.OutputRegister(), 31);
+ __ Branch(tlabel, cc, scratch2, Operand(scratch));
+ } else if (instr->arch_opcode() == kLoong64AddOvf_d ||
+ instr->arch_opcode() == kLoong64SubOvf_d) {
+ switch (condition) {
+ // Overflow occurs if overflow register is negative
+ case kOverflow:
+ __ Branch(tlabel, lt, t8, Operand(zero_reg));
+ break;
+ case kNotOverflow:
+ __ Branch(tlabel, ge, t8, Operand(zero_reg));
+ break;
+ default:
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
+ }
+ } else if (instr->arch_opcode() == kLoong64MulOvf_w) {
+ // Overflow occurs if overflow register is not zero
+ switch (condition) {
+ case kOverflow:
+ __ Branch(tlabel, ne, t8, Operand(zero_reg));
+ break;
+ case kNotOverflow:
+ __ Branch(tlabel, eq, t8, Operand(zero_reg));
+ break;
+ default:
+ UNSUPPORTED_COND(kLoong64MulOvf_w, condition);
+ }
+ } else if (instr->arch_opcode() == kLoong64Cmp) {
+ cc = FlagsConditionToConditionCmp(condition);
+ __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+ } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
+ cc = FlagsConditionToConditionCmp(condition);
+ DCHECK((cc == ls) || (cc == hi));
+ if (cc == ls) {
+ __ xori(i.TempRegister(0), i.TempRegister(0), 1);
+ }
+ __ Branch(tlabel, ne, i.TempRegister(0), Operand(zero_reg));
+ } else if (instr->arch_opcode() == kLoong64Float32Cmp ||
+ instr->arch_opcode() == kLoong64Float64Cmp) {
+ bool predicate;
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
+ if (predicate) {
+ __ BranchTrueF(tlabel);
+ } else {
+ __ BranchFalseF(tlabel);
+ }
+ } else {
+ PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
+ instr->arch_opcode());
+ UNIMPLEMENTED();
+ }
+ if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
+#undef __
+#define __ tasm()->
+}
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+
+ AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
+ branch->fallthru);
+}
+
+#undef UNSUPPORTED_COND
+
+void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
+ BranchInfo* branch) {
+ AssembleArchBranch(instr, branch);
+}
+
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
+ if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
+}
+
+#if V8_ENABLE_WEBASSEMBLY
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
+ : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
+ void Generate() final {
+ Loong64OperandConverter i(gen_, instr_);
+ TrapId trap_id =
+ static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
+ GenerateCallToTrap(trap_id);
+ }
+
+ private:
+ void GenerateCallToTrap(TrapId trap_id) {
+ if (trap_id == TrapId::kInvalid) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ // We use the context register as the scratch register, because we do
+ // not have a context here.
+ __ PrepareCallCFunction(0, 0, cp);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(), 0);
+ __ LeaveFrame(StackFrame::WASM);
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(call_descriptor->ParameterSlotCount());
+ pop_count += (pop_count & 1); // align
+ __ Drop(pop_count);
+ __ Ret();
+ } else {
+ gen_->AssembleSourcePosition(instr_);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
+ ReferenceMap* reference_map =
+ gen_->zone()->New<ReferenceMap>(gen_->zone());
+ gen_->RecordSafepoint(reference_map);
+ if (FLAG_debug_code) {
+ __ stop();
+ }
+ }
+ }
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ auto ool = zone()->New<OutOfLineTrap>(this, instr);
+ Label* tlabel = ool->entry();
+ AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ Loong64OperandConverter i(this, instr);
+
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ DCHECK_NE(0u, instr->OutputCount());
+ Register result = i.OutputRegister(instr->OutputCount() - 1);
+ Condition cc = kNoCondition;
+ // Loong64 does not have condition code flags, so compare and branch are
+ // implemented differently than on the other arch's. The compare operations
+ // emit loong64 pseudo-instructions, which are checked and handled here.
+
+ if (instr->arch_opcode() == kLoong64Tst) {
+ cc = FlagsConditionToConditionTst(condition);
+ if (cc == eq) {
+ __ Sltu(result, t8, 1);
+ } else {
+ __ Sltu(result, zero_reg, t8);
+ }
+ return;
+ } else if (instr->arch_opcode() == kLoong64Add_d ||
+ instr->arch_opcode() == kLoong64Sub_d) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ cc = FlagsConditionToConditionOvf(condition);
+ // Check for overflow creates 1 or 0 for result.
+ __ srli_d(scratch, i.OutputRegister(), 63);
+ __ srli_w(result, i.OutputRegister(), 31);
+ __ xor_(result, scratch, result);
+ if (cc == eq) // Toggle result for not overflow.
+ __ xori(result, result, 1);
+ return;
+ } else if (instr->arch_opcode() == kLoong64AddOvf_d ||
+ instr->arch_opcode() == kLoong64SubOvf_d) {
+ // Overflow occurs if overflow register is negative
+ __ slt(result, t8, zero_reg);
+ } else if (instr->arch_opcode() == kLoong64MulOvf_w) {
+ // Overflow occurs if overflow register is not zero
+ __ Sgtu(result, t8, zero_reg);
+ } else if (instr->arch_opcode() == kLoong64Cmp) {
+ cc = FlagsConditionToConditionCmp(condition);
+ switch (cc) {
+ case eq:
+ case ne: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ if (instr->InputAt(1)->IsImmediate()) {
+ if (is_int12(-right.immediate())) {
+ if (right.immediate() == 0) {
+ if (cc == eq) {
+ __ Sltu(result, left, 1);
+ } else {
+ __ Sltu(result, zero_reg, left);
+ }
+ } else {
+ __ Add_d(result, left, Operand(-right.immediate()));
+ if (cc == eq) {
+ __ Sltu(result, result, 1);
+ } else {
+ __ Sltu(result, zero_reg, result);
+ }
+ }
+ } else {
+ __ Xor(result, left, Operand(right));
+ if (cc == eq) {
+ __ Sltu(result, result, 1);
+ } else {
+ __ Sltu(result, zero_reg, result);
+ }
+ }
+ } else {
+ __ Xor(result, left, right);
+ if (cc == eq) {
+ __ Sltu(result, result, 1);
+ } else {
+ __ Sltu(result, zero_reg, result);
+ }
+ }
+ } break;
+ case lt:
+ case ge: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Slt(result, left, right);
+ if (cc == ge) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case gt:
+ case le: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Slt(result, left, right);
+ if (cc == le) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case lo:
+ case hs: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Sltu(result, left, right);
+ if (cc == hs) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case hi:
+ case ls: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Sltu(result, left, right);
+ if (cc == ls) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ } else if (instr->arch_opcode() == kLoong64Float64Cmp ||
+ instr->arch_opcode() == kLoong64Float32Cmp) {
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ bool predicate;
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
+ {
+ __ movcf2gr(result, FCC0);
+ if (!predicate) {
+ __ xori(result, result, 1);
+ }
+ }
+ return;
+ } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
+ cc = FlagsConditionToConditionCmp(condition);
+ DCHECK((cc == ls) || (cc == hi));
+ if (cc == ls) {
+ __ xori(i.OutputRegister(), i.TempRegister(0), 1);
+ }
+ return;
+ } else {
+ PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
+ instr->arch_opcode());
+ TRACE_UNIMPL();
+ UNIMPLEMENTED();
+ }
+}
+
+void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
+ Loong64OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ std::vector<std::pair<int32_t, Label*>> cases;
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
+ }
+ AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
+ cases.data() + cases.size());
+}
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ Loong64OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+
+ __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
+ __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
+ return GetLabel(i.InputRpo(index + 2));
+ });
+}
+
+void CodeGenerator::AssembleArchSelect(Instruction* instr,
+ FlagsCondition condition) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::FinishFrame(Frame* frame) {
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ int count = base::bits::CountPopulation(saves_fpu);
+ DCHECK_EQ(kNumCalleeSavedFPU, count);
+ frame->AllocateSavedCalleeRegisterSlots(count *
+ (kDoubleSize / kSystemPointerSize));
+ }
+
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ int count = base::bits::CountPopulation(saves);
+ frame->AllocateSavedCalleeRegisterSlots(count);
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+
+ if (frame_access_state()->has_frame()) {
+ if (call_descriptor->IsCFunctionCall()) {
+#if V8_ENABLE_WEBASSEMBLY
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ Sub_d(sp, sp, Operand(kSystemPointerSize));
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
+ } else {
+ __ Push(ra, fp);
+ __ mov(fp, sp);
+ }
+ } else if (call_descriptor->IsJSFunctionCall()) {
+ __ Prologue();
+ } else {
+ __ StubPrologue(info()->GetOutputStackFrameType());
+#if V8_ENABLE_WEBASSEMBLY
+ if (call_descriptor->IsWasmFunctionCall()) {
+ __ Push(kWasmInstanceRegister);
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
+ // Wasm import wrappers are passed a tuple in the place of the instance.
+ // Unpack the tuple into the instance and the target callable.
+ // This must be done here in the codegen because it cannot be expressed
+ // properly in the graph.
+ __ Ld_d(kJSFunctionRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
+ __ Ld_d(kWasmInstanceRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
+ __ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ Sub_d(sp, sp, Operand(kSystemPointerSize));
+ }
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ }
+ }
+
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
+
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ required_slots -= osr_helper()->UnoptimizedFrameSlots();
+ }
+
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+
+ if (required_slots > 0) {
+ DCHECK(frame_access_state()->has_frame());
+#if V8_ENABLE_WEBASSEMBLY
+ if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) {
+ // For WebAssembly functions with big frames we have to do the stack
+ // overflow check before we construct the frame. Otherwise we may not
+ // have enough space on the stack to call the runtime for the stack
+ // overflow.
+ Label done;
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ Ld_d(scratch, FieldMemOperand(
+ kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ __ Ld_d(scratch, MemOperand(scratch, 0));
+ __ Add_d(scratch, scratch,
+ Operand(required_slots * kSystemPointerSize));
+ __ Branch(&done, uge, sp, Operand(scratch));
+ }
+
+ __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call does not return, hence we can ignore any references and just
+ // define an empty safepoint.
+ ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
+ RecordSafepoint(reference_map);
+ if (FLAG_debug_code) {
+ __ stop();
+ }
+
+ __ bind(&done);
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ }
+
+ const int returns = frame()->GetReturnSlotCount();
+
+ // Skip callee-saved and return slots, which are pushed below.
+ required_slots -= base::bits::CountPopulation(saves);
+ required_slots -= base::bits::CountPopulation(saves_fpu);
+ required_slots -= returns;
+ if (required_slots > 0) {
+ __ Sub_d(sp, sp, Operand(required_slots * kSystemPointerSize));
+ }
+
+ if (saves_fpu != 0) {
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(saves_fpu);
+ DCHECK_EQ(kNumCalleeSavedFPU, base::bits::CountPopulation(saves_fpu));
+ }
+
+ if (saves != 0) {
+ // Save callee-saved registers.
+ __ MultiPush(saves);
+ }
+
+ if (returns != 0) {
+ // Create space for returns.
+ __ Sub_d(sp, sp, Operand(returns * kSystemPointerSize));
+ }
+}
+
+void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ __ Add_d(sp, sp, Operand(returns * kSystemPointerSize));
+ }
+
+ // Restore GP registers.
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPop(saves);
+ }
+
+ // Restore FPU registers.
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ __ MultiPopFPU(saves_fpu);
+ }
+
+ Loong64OperandConverter g(this, nullptr);
+
+ const int parameter_slots =
+ static_cast<int>(call_descriptor->ParameterSlotCount());
+
+ // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}.
+ // Check RawMachineAssembler::PopAndReturn.
+ if (parameter_slots != 0) {
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
+ } else if (FLAG_debug_code) {
+ __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue,
+ g.ToRegister(additional_pop_count),
+ Operand(static_cast<int64_t>(0)));
+ }
+ }
+
+ // Functions with JS linkage have at least one parameter (the receiver).
+ // If {parameter_slots} == 0, it means it is a builtin with
+ // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
+ // itself.
+ const bool drop_jsargs = frame_access_state()->has_frame() &&
+ call_descriptor->IsJSFunctionCall() &&
+ parameter_slots != 0;
+
+ if (call_descriptor->IsCFunctionCall()) {
+ AssembleDeconstructFrame();
+ } else if (frame_access_state()->has_frame()) {
+ // Canonicalize JSFunction return sites for now unless they have an variable
+ // number of stack slot pops.
+ if (additional_pop_count->IsImmediate() &&
+ g.ToConstant(additional_pop_count).ToInt32() == 0) {
+ if (return_label_.is_bound()) {
+ __ Branch(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ }
+ }
+ if (drop_jsargs) {
+ // Get the actual argument count
+ __ Ld_d(t0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ }
+ AssembleDeconstructFrame();
+ }
+ if (drop_jsargs) {
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ __ Add_d(t0, t0, Operand(1)); // Also pop the receiver.
+ if (parameter_slots > 1) {
+ __ li(t1, parameter_slots);
+ __ slt(t2, t0, t1);
+ __ Movn(t0, t1, t2);
+ }
+ __ slli_d(t0, t0, kSystemPointerSizeLog2);
+ __ add_d(sp, sp, t0);
+ } else if (additional_pop_count->IsImmediate()) {
+ int additional_count = g.ToConstant(additional_pop_count).ToInt32();
+ __ Drop(parameter_slots + additional_count);
+ } else {
+ Register pop_reg = g.ToRegister(additional_pop_count);
+ __ Drop(parameter_slots);
+ __ slli_d(pop_reg, pop_reg, kSystemPointerSizeLog2);
+ __ add_d(sp, sp, pop_reg);
+ }
+ __ Ret();
+}
+
+void CodeGenerator::FinishCode() {}
+
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {}
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ Loong64OperandConverter g(this, nullptr);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ mov(g.ToRegister(destination), src);
+ } else {
+ __ St_d(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ Ld_d(g.ToRegister(destination), src);
+ } else {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ Ld_d(scratch, src);
+ __ St_d(scratch, g.ToMemOperand(destination));
+ }
+ } else if (source->IsConstant()) {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister() || destination->IsStackSlot()) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ Register dst =
+ destination->IsRegister() ? g.ToRegister(destination) : scratch;
+ switch (src.type()) {
+ case Constant::kInt32:
+ __ li(dst, Operand(src.ToInt32()));
+ break;
+ case Constant::kFloat32:
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
+ break;
+ case Constant::kInt64:
+#if V8_ENABLE_WEBASSEMBLY
+ if (RelocInfo::IsWasmReference(src.rmode()))
+ __ li(dst, Operand(src.ToInt64(), src.rmode()));
+ else
+#endif // V8_ENABLE_WEBASSEMBLY
+ __ li(dst, Operand(src.ToInt64()));
+ break;
+ case Constant::kFloat64:
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
+ break;
+ case Constant::kExternalReference:
+ __ li(dst, src.ToExternalReference());
+ break;
+ case Constant::kDelayedStringConstant:
+ __ li(dst, src.ToDelayedStringConstant());
+ break;
+ case Constant::kHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ RootIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ li(dst, src_object);
+ }
+ break;
+ }
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(titzer): loading RPO numbers on LOONG64.
+ }
+ if (destination->IsStackSlot()) __ St_d(dst, g.ToMemOperand(destination));
+ } else if (src.type() == Constant::kFloat32) {
+ if (destination->IsFPStackSlot()) {
+ MemOperand dst = g.ToMemOperand(destination);
+ if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
+ __ St_d(zero_reg, dst);
+ } else {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ li(scratch, Operand(bit_cast<int32_t>(src.ToFloat32())));
+ __ St_d(scratch, dst);
+ }
+ } else {
+ DCHECK(destination->IsFPRegister());
+ FloatRegister dst = g.ToSingleRegister(destination);
+ __ Move(dst, src.ToFloat32());
+ }
+ } else {
+ DCHECK_EQ(Constant::kFloat64, src.type());
+ DoubleRegister dst = destination->IsFPRegister()
+ ? g.ToDoubleRegister(destination)
+ : kScratchDoubleReg;
+ __ Move(dst, src.ToFloat64().value());
+ if (destination->IsFPStackSlot()) {
+ __ Fst_d(dst, g.ToMemOperand(destination));
+ }
+ }
+ } else if (source->IsFPRegister()) {
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ __ Fst_d(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsFPRegister()) {
+ __ Fld_d(g.ToDoubleRegister(destination), src);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ FPURegister temp = kScratchDoubleReg;
+ __ Fld_d(temp, src);
+ __ Fst_d(temp, g.ToMemOperand(destination));
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ Loong64OperandConverter g(this, nullptr);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ // Register-register.
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ Move(scratch, src);
+ __ Move(src, dst);
+ __ Move(dst, scratch);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ mov(scratch, src);
+ __ Ld_d(src, dst);
+ __ St_d(scratch, dst);
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsStackSlot());
+ // TODO(LOONG_dev): LOONG64 Optimize scratch registers usage
+ // Since the Ld instruction may need a scratch reg,
+ // we should not use both of the two scratch registers in
+ // UseScratchRegisterScope here.
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ FPURegister scratch_d = kScratchDoubleReg;
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Ld_d(scratch, src);
+ __ Fld_d(scratch_d, dst);
+ __ St_d(scratch, dst);
+ __ Fst_d(scratch_d, src);
+ } else if (source->IsFPRegister()) {
+ FPURegister scratch_d = kScratchDoubleReg;
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(scratch_d, src);
+ __ Move(src, dst);
+ __ Move(dst, scratch_d);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(scratch_d, src);
+ __ Fld_d(src, dst);
+ __ Fst_d(scratch_d, dst);
+ }
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPStackSlot());
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ MemOperand src0 = g.ToMemOperand(source);
+ MemOperand src1(src0.base(), src0.offset() + kIntSize);
+ MemOperand dst0 = g.ToMemOperand(destination);
+ MemOperand dst1(dst0.base(), dst0.offset() + kIntSize);
+ FPURegister scratch_d = kScratchDoubleReg;
+ __ Fld_d(scratch_d, dst0); // Save destination in temp_1.
+ __ Ld_w(scratch, src0); // Then use scratch to copy source to destination.
+ __ St_w(scratch, dst0);
+ __ Ld_w(scratch, src1);
+ __ St_w(scratch, dst1);
+ __ Fst_d(scratch_d, src0);
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ // On 64-bit LOONG64 we emit the jump tables inline.
+ UNREACHABLE();
+}
+
+#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
+#undef ASSEMBLE_ATOMIC_STORE_INTEGER
+#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_ATOMIC_BINOP_EXT
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
+#undef ASSEMBLE_IEEE754_BINOP
+#undef ASSEMBLE_IEEE754_UNOP
+
+#undef TRACE_MSG
+#undef TRACE_UNIMPL
+#undef __
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h b/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h
new file mode 100644
index 0000000000..f31818cac2
--- /dev/null
+++ b/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h
@@ -0,0 +1,397 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_
+#define V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// LOONG64-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(Loong64Add_d) \
+ V(Loong64Add_w) \
+ V(Loong64AddOvf_d) \
+ V(Loong64Sub_d) \
+ V(Loong64Sub_w) \
+ V(Loong64SubOvf_d) \
+ V(Loong64Mul_d) \
+ V(Loong64MulOvf_w) \
+ V(Loong64Mulh_d) \
+ V(Loong64Mulh_w) \
+ V(Loong64Mulh_wu) \
+ V(Loong64Mul_w) \
+ V(Loong64Div_d) \
+ V(Loong64Div_w) \
+ V(Loong64Div_du) \
+ V(Loong64Div_wu) \
+ V(Loong64Mod_d) \
+ V(Loong64Mod_w) \
+ V(Loong64Mod_du) \
+ V(Loong64Mod_wu) \
+ V(Loong64And) \
+ V(Loong64And32) \
+ V(Loong64Or) \
+ V(Loong64Or32) \
+ V(Loong64Nor) \
+ V(Loong64Nor32) \
+ V(Loong64Xor) \
+ V(Loong64Xor32) \
+ V(Loong64Alsl_d) \
+ V(Loong64Alsl_w) \
+ V(Loong64Sll_d) \
+ V(Loong64Sll_w) \
+ V(Loong64Srl_d) \
+ V(Loong64Srl_w) \
+ V(Loong64Sra_d) \
+ V(Loong64Sra_w) \
+ V(Loong64Rotr_d) \
+ V(Loong64Rotr_w) \
+ V(Loong64Bstrpick_d) \
+ V(Loong64Bstrpick_w) \
+ V(Loong64Bstrins_d) \
+ V(Loong64Bstrins_w) \
+ V(Loong64ByteSwap64) \
+ V(Loong64ByteSwap32) \
+ V(Loong64Clz_d) \
+ V(Loong64Clz_w) \
+ V(Loong64Mov) \
+ V(Loong64Tst) \
+ V(Loong64Cmp) \
+ V(Loong64Float32Cmp) \
+ V(Loong64Float32Add) \
+ V(Loong64Float32Sub) \
+ V(Loong64Float32Mul) \
+ V(Loong64Float32Div) \
+ V(Loong64Float32Abs) \
+ V(Loong64Float32Neg) \
+ V(Loong64Float32Sqrt) \
+ V(Loong64Float32Max) \
+ V(Loong64Float32Min) \
+ V(Loong64Float32ToFloat64) \
+ V(Loong64Float32RoundDown) \
+ V(Loong64Float32RoundUp) \
+ V(Loong64Float32RoundTruncate) \
+ V(Loong64Float32RoundTiesEven) \
+ V(Loong64Float32ToInt32) \
+ V(Loong64Float32ToInt64) \
+ V(Loong64Float32ToUint32) \
+ V(Loong64Float32ToUint64) \
+ V(Loong64Float64Cmp) \
+ V(Loong64Float64Add) \
+ V(Loong64Float64Sub) \
+ V(Loong64Float64Mul) \
+ V(Loong64Float64Div) \
+ V(Loong64Float64Mod) \
+ V(Loong64Float64Abs) \
+ V(Loong64Float64Neg) \
+ V(Loong64Float64Sqrt) \
+ V(Loong64Float64Max) \
+ V(Loong64Float64Min) \
+ V(Loong64Float64ToFloat32) \
+ V(Loong64Float64RoundDown) \
+ V(Loong64Float64RoundUp) \
+ V(Loong64Float64RoundTruncate) \
+ V(Loong64Float64RoundTiesEven) \
+ V(Loong64Float64ToInt32) \
+ V(Loong64Float64ToInt64) \
+ V(Loong64Float64ToUint32) \
+ V(Loong64Float64ToUint64) \
+ V(Loong64Int32ToFloat32) \
+ V(Loong64Int32ToFloat64) \
+ V(Loong64Int64ToFloat32) \
+ V(Loong64Int64ToFloat64) \
+ V(Loong64Uint32ToFloat32) \
+ V(Loong64Uint32ToFloat64) \
+ V(Loong64Uint64ToFloat32) \
+ V(Loong64Uint64ToFloat64) \
+ V(Loong64Float64ExtractLowWord32) \
+ V(Loong64Float64ExtractHighWord32) \
+ V(Loong64Float64InsertLowWord32) \
+ V(Loong64Float64InsertHighWord32) \
+ V(Loong64BitcastDL) \
+ V(Loong64BitcastLD) \
+ V(Loong64Float64SilenceNaN) \
+ V(Loong64Ld_b) \
+ V(Loong64Ld_bu) \
+ V(Loong64St_b) \
+ V(Loong64Ld_h) \
+ V(Loong64Ld_hu) \
+ V(Loong64St_h) \
+ V(Loong64Ld_w) \
+ V(Loong64Ld_wu) \
+ V(Loong64St_w) \
+ V(Loong64Ld_d) \
+ V(Loong64St_d) \
+ V(Loong64Fld_s) \
+ V(Loong64Fst_s) \
+ V(Loong64Fld_d) \
+ V(Loong64Fst_d) \
+ V(Loong64Push) \
+ V(Loong64Peek) \
+ V(Loong64Poke) \
+ V(Loong64StackClaim) \
+ V(Loong64Ext_w_b) \
+ V(Loong64Ext_w_h) \
+ V(Loong64Dbar) \
+ V(Loong64S128Const) \
+ V(Loong64S128Zero) \
+ V(Loong64S128AllOnes) \
+ V(Loong64I32x4Splat) \
+ V(Loong64I32x4ExtractLane) \
+ V(Loong64I32x4ReplaceLane) \
+ V(Loong64I32x4Add) \
+ V(Loong64I32x4Sub) \
+ V(Loong64F64x2Abs) \
+ V(Loong64F64x2Neg) \
+ V(Loong64F32x4Splat) \
+ V(Loong64F32x4ExtractLane) \
+ V(Loong64F32x4ReplaceLane) \
+ V(Loong64F32x4SConvertI32x4) \
+ V(Loong64F32x4UConvertI32x4) \
+ V(Loong64I32x4Mul) \
+ V(Loong64I32x4MaxS) \
+ V(Loong64I32x4MinS) \
+ V(Loong64I32x4Eq) \
+ V(Loong64I32x4Ne) \
+ V(Loong64I32x4Shl) \
+ V(Loong64I32x4ShrS) \
+ V(Loong64I32x4ShrU) \
+ V(Loong64I32x4MaxU) \
+ V(Loong64I32x4MinU) \
+ V(Loong64F64x2Sqrt) \
+ V(Loong64F64x2Add) \
+ V(Loong64F64x2Sub) \
+ V(Loong64F64x2Mul) \
+ V(Loong64F64x2Div) \
+ V(Loong64F64x2Min) \
+ V(Loong64F64x2Max) \
+ V(Loong64F64x2Eq) \
+ V(Loong64F64x2Ne) \
+ V(Loong64F64x2Lt) \
+ V(Loong64F64x2Le) \
+ V(Loong64F64x2Splat) \
+ V(Loong64F64x2ExtractLane) \
+ V(Loong64F64x2ReplaceLane) \
+ V(Loong64F64x2Pmin) \
+ V(Loong64F64x2Pmax) \
+ V(Loong64F64x2Ceil) \
+ V(Loong64F64x2Floor) \
+ V(Loong64F64x2Trunc) \
+ V(Loong64F64x2NearestInt) \
+ V(Loong64F64x2ConvertLowI32x4S) \
+ V(Loong64F64x2ConvertLowI32x4U) \
+ V(Loong64F64x2PromoteLowF32x4) \
+ V(Loong64I64x2Splat) \
+ V(Loong64I64x2ExtractLane) \
+ V(Loong64I64x2ReplaceLane) \
+ V(Loong64I64x2Add) \
+ V(Loong64I64x2Sub) \
+ V(Loong64I64x2Mul) \
+ V(Loong64I64x2Neg) \
+ V(Loong64I64x2Shl) \
+ V(Loong64I64x2ShrS) \
+ V(Loong64I64x2ShrU) \
+ V(Loong64I64x2BitMask) \
+ V(Loong64I64x2Eq) \
+ V(Loong64I64x2Ne) \
+ V(Loong64I64x2GtS) \
+ V(Loong64I64x2GeS) \
+ V(Loong64I64x2Abs) \
+ V(Loong64I64x2SConvertI32x4Low) \
+ V(Loong64I64x2SConvertI32x4High) \
+ V(Loong64I64x2UConvertI32x4Low) \
+ V(Loong64I64x2UConvertI32x4High) \
+ V(Loong64ExtMulLow) \
+ V(Loong64ExtMulHigh) \
+ V(Loong64ExtAddPairwise) \
+ V(Loong64F32x4Abs) \
+ V(Loong64F32x4Neg) \
+ V(Loong64F32x4Sqrt) \
+ V(Loong64F32x4RecipApprox) \
+ V(Loong64F32x4RecipSqrtApprox) \
+ V(Loong64F32x4Add) \
+ V(Loong64F32x4Sub) \
+ V(Loong64F32x4Mul) \
+ V(Loong64F32x4Div) \
+ V(Loong64F32x4Max) \
+ V(Loong64F32x4Min) \
+ V(Loong64F32x4Eq) \
+ V(Loong64F32x4Ne) \
+ V(Loong64F32x4Lt) \
+ V(Loong64F32x4Le) \
+ V(Loong64F32x4Pmin) \
+ V(Loong64F32x4Pmax) \
+ V(Loong64F32x4Ceil) \
+ V(Loong64F32x4Floor) \
+ V(Loong64F32x4Trunc) \
+ V(Loong64F32x4NearestInt) \
+ V(Loong64F32x4DemoteF64x2Zero) \
+ V(Loong64I32x4SConvertF32x4) \
+ V(Loong64I32x4UConvertF32x4) \
+ V(Loong64I32x4Neg) \
+ V(Loong64I32x4GtS) \
+ V(Loong64I32x4GeS) \
+ V(Loong64I32x4GtU) \
+ V(Loong64I32x4GeU) \
+ V(Loong64I32x4Abs) \
+ V(Loong64I32x4BitMask) \
+ V(Loong64I32x4DotI16x8S) \
+ V(Loong64I32x4TruncSatF64x2SZero) \
+ V(Loong64I32x4TruncSatF64x2UZero) \
+ V(Loong64I16x8Splat) \
+ V(Loong64I16x8ExtractLaneU) \
+ V(Loong64I16x8ExtractLaneS) \
+ V(Loong64I16x8ReplaceLane) \
+ V(Loong64I16x8Neg) \
+ V(Loong64I16x8Shl) \
+ V(Loong64I16x8ShrS) \
+ V(Loong64I16x8ShrU) \
+ V(Loong64I16x8Add) \
+ V(Loong64I16x8AddSatS) \
+ V(Loong64I16x8Sub) \
+ V(Loong64I16x8SubSatS) \
+ V(Loong64I16x8Mul) \
+ V(Loong64I16x8MaxS) \
+ V(Loong64I16x8MinS) \
+ V(Loong64I16x8Eq) \
+ V(Loong64I16x8Ne) \
+ V(Loong64I16x8GtS) \
+ V(Loong64I16x8GeS) \
+ V(Loong64I16x8AddSatU) \
+ V(Loong64I16x8SubSatU) \
+ V(Loong64I16x8MaxU) \
+ V(Loong64I16x8MinU) \
+ V(Loong64I16x8GtU) \
+ V(Loong64I16x8GeU) \
+ V(Loong64I16x8RoundingAverageU) \
+ V(Loong64I16x8Abs) \
+ V(Loong64I16x8BitMask) \
+ V(Loong64I16x8Q15MulRSatS) \
+ V(Loong64I8x16Splat) \
+ V(Loong64I8x16ExtractLaneU) \
+ V(Loong64I8x16ExtractLaneS) \
+ V(Loong64I8x16ReplaceLane) \
+ V(Loong64I8x16Neg) \
+ V(Loong64I8x16Shl) \
+ V(Loong64I8x16ShrS) \
+ V(Loong64I8x16Add) \
+ V(Loong64I8x16AddSatS) \
+ V(Loong64I8x16Sub) \
+ V(Loong64I8x16SubSatS) \
+ V(Loong64I8x16MaxS) \
+ V(Loong64I8x16MinS) \
+ V(Loong64I8x16Eq) \
+ V(Loong64I8x16Ne) \
+ V(Loong64I8x16GtS) \
+ V(Loong64I8x16GeS) \
+ V(Loong64I8x16ShrU) \
+ V(Loong64I8x16AddSatU) \
+ V(Loong64I8x16SubSatU) \
+ V(Loong64I8x16MaxU) \
+ V(Loong64I8x16MinU) \
+ V(Loong64I8x16GtU) \
+ V(Loong64I8x16GeU) \
+ V(Loong64I8x16RoundingAverageU) \
+ V(Loong64I8x16Abs) \
+ V(Loong64I8x16Popcnt) \
+ V(Loong64I8x16BitMask) \
+ V(Loong64S128And) \
+ V(Loong64S128Or) \
+ V(Loong64S128Xor) \
+ V(Loong64S128Not) \
+ V(Loong64S128Select) \
+ V(Loong64S128AndNot) \
+ V(Loong64I64x2AllTrue) \
+ V(Loong64I32x4AllTrue) \
+ V(Loong64I16x8AllTrue) \
+ V(Loong64I8x16AllTrue) \
+ V(Loong64V128AnyTrue) \
+ V(Loong64S32x4InterleaveRight) \
+ V(Loong64S32x4InterleaveLeft) \
+ V(Loong64S32x4PackEven) \
+ V(Loong64S32x4PackOdd) \
+ V(Loong64S32x4InterleaveEven) \
+ V(Loong64S32x4InterleaveOdd) \
+ V(Loong64S32x4Shuffle) \
+ V(Loong64S16x8InterleaveRight) \
+ V(Loong64S16x8InterleaveLeft) \
+ V(Loong64S16x8PackEven) \
+ V(Loong64S16x8PackOdd) \
+ V(Loong64S16x8InterleaveEven) \
+ V(Loong64S16x8InterleaveOdd) \
+ V(Loong64S16x4Reverse) \
+ V(Loong64S16x2Reverse) \
+ V(Loong64S8x16InterleaveRight) \
+ V(Loong64S8x16InterleaveLeft) \
+ V(Loong64S8x16PackEven) \
+ V(Loong64S8x16PackOdd) \
+ V(Loong64S8x16InterleaveEven) \
+ V(Loong64S8x16InterleaveOdd) \
+ V(Loong64I8x16Shuffle) \
+ V(Loong64I8x16Swizzle) \
+ V(Loong64S8x16Concat) \
+ V(Loong64S8x8Reverse) \
+ V(Loong64S8x4Reverse) \
+ V(Loong64S8x2Reverse) \
+ V(Loong64S128LoadSplat) \
+ V(Loong64S128Load8x8S) \
+ V(Loong64S128Load8x8U) \
+ V(Loong64S128Load16x4S) \
+ V(Loong64S128Load16x4U) \
+ V(Loong64S128Load32x2S) \
+ V(Loong64S128Load32x2U) \
+ V(Loong64S128Load32Zero) \
+ V(Loong64S128Load64Zero) \
+ V(Loong64LoadLane) \
+ V(Loong64StoreLane) \
+ V(Loong64I32x4SConvertI16x8Low) \
+ V(Loong64I32x4SConvertI16x8High) \
+ V(Loong64I32x4UConvertI16x8Low) \
+ V(Loong64I32x4UConvertI16x8High) \
+ V(Loong64I16x8SConvertI8x16Low) \
+ V(Loong64I16x8SConvertI8x16High) \
+ V(Loong64I16x8SConvertI32x4) \
+ V(Loong64I16x8UConvertI32x4) \
+ V(Loong64I16x8UConvertI8x16Low) \
+ V(Loong64I16x8UConvertI8x16High) \
+ V(Loong64I8x16SConvertI16x8) \
+ V(Loong64I8x16UConvertI16x8) \
+ V(Loong64StoreCompressTagged) \
+ V(Loong64Word64AtomicLoadUint32) \
+ V(Loong64Word64AtomicLoadUint64) \
+ V(Loong64Word64AtomicStoreWord64) \
+ V(Loong64Word64AtomicAddUint64) \
+ V(Loong64Word64AtomicSubUint64) \
+ V(Loong64Word64AtomicAndUint64) \
+ V(Loong64Word64AtomicOrUint64) \
+ V(Loong64Word64AtomicXorUint64) \
+ V(Loong64Word64AtomicExchangeUint64) \
+ V(Loong64Word64AtomicCompareExchangeUint64)
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */ \
+ V(Root) /* [%rr + K] */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_
diff --git a/deps/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc b/deps/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc
new file mode 100644
index 0000000000..3cfec9c403
--- /dev/null
+++ b/deps/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc
@@ -0,0 +1,26 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/macro-assembler.h"
+#include "src/compiler/backend/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(LOONG_dev): LOONG64 Support instruction scheduler.
+bool InstructionScheduler::SchedulerSupported() { return false; }
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ UNREACHABLE();
+}
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ UNREACHABLE();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
new file mode 100644
index 0000000000..454bfa9986
--- /dev/null
+++ b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
@@ -0,0 +1,3124 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/base/platform/wrappers.h"
+#include "src/codegen/machine-type.h"
+#include "src/compiler/backend/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE_UNIMPL() \
+ PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+// Adds loong64-specific methods for generating InstructionOperands.
+class Loong64OperandGenerator final : public OperandGenerator {
+ public:
+ explicit Loong64OperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
+ if (CanBeImmediate(node, opcode)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ // Use the zero register if the node has the immediate value zero, otherwise
+ // assign a register.
+ InstructionOperand UseRegisterOrImmediateZero(Node* node) {
+ if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
+ (IsFloatConstant(node) &&
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool IsIntegerConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kInt32Constant) ||
+ (node->opcode() == IrOpcode::kInt64Constant);
+ }
+
+ int64_t GetIntegerConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kInt32Constant) {
+ return OpParameter<int32_t>(node->op());
+ }
+ DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
+ return OpParameter<int64_t>(node->op());
+ }
+
+ bool IsFloatConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kFloat32Constant) ||
+ (node->opcode() == IrOpcode::kFloat64Constant);
+ }
+
+ double GetFloatConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kFloat32Constant) {
+ return OpParameter<float>(node->op());
+ }
+ DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
+ return OpParameter<double>(node->op());
+ }
+
+ bool CanBeImmediate(Node* node, InstructionCode mode) {
+ return IsIntegerConstant(node) &&
+ CanBeImmediate(GetIntegerConstantValue(node), mode);
+ }
+
+ bool CanBeImmediate(int64_t value, InstructionCode opcode) {
+ switch (ArchOpcodeField::decode(opcode)) {
+ case kLoong64Sll_w:
+ case kLoong64Srl_w:
+ case kLoong64Sra_w:
+ return is_uint5(value);
+ case kLoong64Sll_d:
+ case kLoong64Srl_d:
+ case kLoong64Sra_d:
+ return is_uint6(value);
+ case kLoong64And:
+ case kLoong64And32:
+ case kLoong64Or:
+ case kLoong64Or32:
+ case kLoong64Xor:
+ case kLoong64Xor32:
+ case kLoong64Tst:
+ return is_uint12(value);
+ case kLoong64Ld_b:
+ case kLoong64Ld_bu:
+ case kLoong64St_b:
+ case kLoong64Ld_h:
+ case kLoong64Ld_hu:
+ case kLoong64St_h:
+ case kLoong64Ld_w:
+ case kLoong64Ld_wu:
+ case kLoong64St_w:
+ case kLoong64Ld_d:
+ case kLoong64St_d:
+ case kLoong64Fld_s:
+ case kLoong64Fst_s:
+ case kLoong64Fld_d:
+ case kLoong64Fst_d:
+ return is_int16(value);
+ default:
+ return is_int12(value);
+ }
+ }
+
+ private:
+ bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
+ TRACE_UNIMPL();
+ return false;
+ }
+};
+
+static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node->op());
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
+}
+
+static void VisitSimdShift(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ if (g.IsIntegerConstant(node->InputAt(1))) {
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)));
+ } else {
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+ }
+}
+
+static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node->op());
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
+ g.UseRegister(node->InputAt(1)));
+}
+
+static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
+void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(
+ opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
+}
+
+static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseOperand(node->InputAt(1), opcode));
+}
+
+struct ExtendingLoadMatcher {
+ ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
+ : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
+ Initialize(node);
+ }
+
+ bool Matches() const { return matches_; }
+
+ Node* base() const {
+ DCHECK(Matches());
+ return base_;
+ }
+ int64_t immediate() const {
+ DCHECK(Matches());
+ return immediate_;
+ }
+ ArchOpcode opcode() const {
+ DCHECK(Matches());
+ return opcode_;
+ }
+
+ private:
+ bool matches_;
+ InstructionSelector* selector_;
+ Node* base_;
+ int64_t immediate_;
+ ArchOpcode opcode_;
+
+ void Initialize(Node* node) {
+ Int64BinopMatcher m(node);
+ // When loading a 64-bit value and shifting by 32, we should
+ // just load and sign-extend the interesting 4 bytes instead.
+ // This happens, for example, when we're loading and untagging SMIs.
+ DCHECK(m.IsWord64Sar());
+ if (m.left().IsLoad() && m.right().Is(32) &&
+ selector_->CanCover(m.node(), m.left().node())) {
+ DCHECK_EQ(selector_->GetEffectLevel(node),
+ selector_->GetEffectLevel(m.left().node()));
+ MachineRepresentation rep =
+ LoadRepresentationOf(m.left().node()->op()).representation();
+ DCHECK_EQ(3, ElementSizeLog2Of(rep));
+ if (rep != MachineRepresentation::kTaggedSigned &&
+ rep != MachineRepresentation::kTaggedPointer &&
+ rep != MachineRepresentation::kTagged &&
+ rep != MachineRepresentation::kWord64) {
+ return;
+ }
+
+ Loong64OperandGenerator g(selector_);
+ Node* load = m.left().node();
+ Node* offset = load->InputAt(1);
+ base_ = load->InputAt(0);
+ opcode_ = kLoong64Ld_w;
+ if (g.CanBeImmediate(offset, opcode_)) {
+ immediate_ = g.GetIntegerConstantValue(offset) + 4;
+ matches_ = g.CanBeImmediate(immediate_, kLoong64Ld_w);
+ }
+ }
+ }
+};
+
+bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
+ Node* output_node) {
+ ExtendingLoadMatcher m(node, selector);
+ Loong64OperandGenerator g(selector);
+ if (m.Matches()) {
+ InstructionOperand inputs[2];
+ inputs[0] = g.UseRegister(m.base());
+ InstructionCode opcode =
+ m.opcode() | AddressingModeField::encode(kMode_MRI);
+ DCHECK(is_int32(m.immediate()));
+ inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
+ InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
+ selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
+ inputs);
+ return true;
+ }
+ return false;
+}
+
+bool TryMatchImmediate(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ size_t* input_count_return, InstructionOperand* inputs) {
+ Loong64OperandGenerator g(selector);
+ if (g.CanBeImmediate(node, *opcode_return)) {
+ *opcode_return |= AddressingModeField::encode(kMode_MRI);
+ inputs[0] = g.UseImmediate(node);
+ *input_count_return = 1;
+ return true;
+ }
+ return false;
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, bool has_reverse_opcode,
+ InstructionCode reverse_opcode,
+ FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand inputs[2];
+ size_t input_count = 0;
+ InstructionOperand outputs[1];
+ size_t output_count = 0;
+
+ if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
+ &inputs[1])) {
+ inputs[0] = g.UseRegister(m.left().node());
+ input_count++;
+ } else if (has_reverse_opcode &&
+ TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
+ &input_count, &inputs[1])) {
+ inputs[0] = g.UseRegister(m.right().node());
+ opcode = reverse_opcode;
+ input_count++;
+ } else {
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+ }
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_EQ(1u, output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, bool has_reverse_opcode,
+ InstructionCode reverse_opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ VisitBinop(selector, node, opcode, false, kArchNop, cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ VisitBinop(selector, node, opcode, false, kArchNop);
+}
+
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int alignment = rep.alignment();
+ int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
+
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+}
+
+void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
+ Node* output = nullptr) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ ExternalReferenceMatcher m(base);
+ if (m.HasResolvedValue() && g.IsIntegerConstant(index) &&
+ selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
+ ptrdiff_t const delta =
+ g.GetIntegerConstantValue(index) +
+ TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ selector->isolate(), m.ResolvedValue());
+ // Check that the delta is a 32-bit integer due to the limitations of
+ // immediate operands.
+ if (is_int32(delta)) {
+ opcode |= AddressingModeField::encode(kMode_Root);
+ selector->Emit(opcode,
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseImmediate(static_cast<int32_t>(delta)));
+ return;
+ }
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseRegister(base), g.UseRegister(index));
+ }
+}
+
+void InstructionSelector::VisitStoreLane(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitLoadLane(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitLoadTransform(Node* node) {
+ LoadTransformParameters params = LoadTransformParametersOf(node->op());
+
+ InstructionCode opcode = kArchNop;
+ switch (params.transformation) {
+ // TODO(LOONG_dev): LOONG64 S128 LoadSplat
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kLoong64S128LoadSplat;
+ break;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kLoong64S128LoadSplat;
+ break;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kLoong64S128LoadSplat;
+ break;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kLoong64S128LoadSplat;
+ break;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kLoong64S128Load8x8S;
+ break;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kLoong64S128Load8x8U;
+ break;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kLoong64S128Load16x4S;
+ break;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kLoong64S128Load16x4U;
+ break;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kLoong64S128Load32x2S;
+ break;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kLoong64S128Load32x2U;
+ break;
+ case LoadTransformation::kS128Load32Zero:
+ opcode = kLoong64S128Load32Zero;
+ break;
+ case LoadTransformation::kS128Load64Zero:
+ opcode = kLoong64S128Load64Zero;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ EmitLoad(this, node, opcode);
+}
+
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+
+ InstructionCode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ opcode = kLoong64Fld_s;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kLoong64Fld_d;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kLoong64Ld_bu : kLoong64Ld_b;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kLoong64Ld_hu : kLoong64Ld_h;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kLoong64Ld_w;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kLoong64Ld_d;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
+ case MachineRepresentation::kNone:
+ case MachineRepresentation::kSimd128:
+ UNREACHABLE();
+ }
+
+ EmitLoad(this, node, opcode);
+}
+
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitStore(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
+ write_barrier_kind = kFullWriteBarrier;
+ }
+
+ // TODO(loong64): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
+ DCHECK(CanBeTaggedPointer(rep));
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ // OutOfLineRecordWrite uses the index in an arithmetic instruction, so we
+ // must check kArithmeticImm as well as kLoadStoreImm64.
+ if (g.CanBeImmediate(index, kLoong64Add_d)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MRR;
+ }
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode =
+ WriteBarrierKindToRecordWriteMode(write_barrier_kind);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= AddressingModeField::encode(addressing_mode);
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs);
+ } else {
+ ArchOpcode opcode;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kLoong64Fst_s;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kLoong64Fst_d;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kLoong64St_b;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kLoong64St_h;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kLoong64St_w;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kLoong64St_d;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
+ case MachineRepresentation::kNone:
+ case MachineRepresentation::kSimd128:
+ UNREACHABLE();
+ }
+
+ ExternalReferenceMatcher m(base);
+ if (m.HasResolvedValue() && g.IsIntegerConstant(index) &&
+ CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
+ ptrdiff_t const delta =
+ g.GetIntegerConstantValue(index) +
+ TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ isolate(), m.ResolvedValue());
+ // Check that the delta is a 32-bit integer due to the limitations of
+ // immediate operands.
+ if (is_int32(delta)) {
+ Emit(opcode | AddressingModeField::encode(kMode_Root), g.NoOutput(),
+ g.UseImmediate(static_cast<int32_t>(delta)), g.UseImmediate(0),
+ g.UseRegisterOrImmediateZero(value));
+ return;
+ }
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(index),
+ g.UseRegisterOrImmediateZero(value));
+ }
+ }
+}
+
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+
+ // Select Bstrpick_w for And(Shr(x, imm), mask) where the mask is in the
+ // least significant bits.
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue()) {
+ // Any shift value can match; int32 shifts use `value % 32`.
+ uint32_t lsb = mleft.right().ResolvedValue() & 0x1F;
+
+ // Bstrpick_w cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Bstrpick_w with a smaller mask and the remaining bits will
+ // be zeros.
+ if (lsb + mask_width > 32) mask_width = 32 - lsb;
+
+ Emit(kLoong64Bstrpick_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
+ uint32_t shift = base::bits::CountPopulation(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros32(~mask);
+ if (shift != 0 && shift != 32 && msb + shift == 32) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of inverted mask.
+ Emit(kLoong64Bstrins_w, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0),
+ g.TempImmediate(shift));
+ return;
+ }
+ }
+ VisitBinop(this, node, kLoong64And32, true, kLoong64And32);
+}
+
+void InstructionSelector::VisitWord64And(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasResolvedValue()) {
+ uint64_t mask = m.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+
+ // Select Bstrpick_d for And(Shr(x, imm), mask) where the mask is in the
+ // least significant bits.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue()) {
+ // Any shift value can match; int64 shifts use `value % 64`.
+ uint32_t lsb =
+ static_cast<uint32_t>(mleft.right().ResolvedValue() & 0x3F);
+
+ // Bstrpick_d cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Bstrpick_d with a smaller mask and the remaining bits will
+ // be zeros.
+ if (lsb + mask_width > 64) mask_width = 64 - lsb;
+
+ if (lsb == 0 && mask_width == 64) {
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
+ } else {
+ Emit(kLoong64Bstrpick_d, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(static_cast<int32_t>(mask_width)));
+ }
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasResolvedValue()) {
+ uint64_t mask = m.right().ResolvedValue();
+ uint32_t shift = base::bits::CountPopulation(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros64(~mask);
+ if (shift != 0 && shift < 32 && msb + shift == 64) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of inverted mask. Dins cannot insert bits
+ // past word size, so shifts smaller than 32 are covered.
+ Emit(kLoong64Bstrins_d, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0),
+ g.TempImmediate(shift));
+ return;
+ }
+ }
+ VisitBinop(this, node, kLoong64And, true, kLoong64And);
+}
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ VisitBinop(this, node, kLoong64Or32, true, kLoong64Or32);
+}
+
+void InstructionSelector::VisitWord64Or(Node* node) {
+ VisitBinop(this, node, kLoong64Or, true, kLoong64Or);
+}
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasResolvedValue()) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Nor32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Nor32, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0));
+ return;
+ }
+ VisitBinop(this, node, kLoong64Xor32, true, kLoong64Xor32);
+}
+
+void InstructionSelector::VisitWord64Xor(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int64BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasResolvedValue()) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Nor, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
+ VisitBinop(this, node, kLoong64Xor, true, kLoong64Xor);
+}
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 31)) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher mleft(m.left().node());
+ // Match Word32Shl(Word32And(x, mask), imm) to Sll_w where the mask is
+ // contiguous, and the shift immediate non-zero.
+ if (mleft.right().HasResolvedValue()) {
+ uint32_t mask = mleft.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ uint32_t shift = m.right().ResolvedValue();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+ DCHECK_NE(0u, shift);
+ if ((shift + mask_width) >= 32) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Sll_w, node);
+}
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
+ uint32_t lsb = m.right().ResolvedValue() & 0x1F;
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue() &&
+ mleft.right().ResolvedValue() != 0) {
+ // Select Bstrpick_w for Shr(And(x, mask), imm) where the result of the
+ // mask is shifted into the least-significant bits.
+ uint32_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_msb + mask_width + lsb) == 32) {
+ Loong64OperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
+ Emit(kLoong64Bstrpick_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Srl_w, node);
+}
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
+ Loong64OperandGenerator g(this);
+ uint32_t sar = m.right().ResolvedValue();
+ uint32_t shl = mleft.right().ResolvedValue();
+ if ((sar == shl) && (sar == 16)) {
+ Emit(kLoong64Ext_w_h, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if ((sar == shl) && (sar == 24)) {
+ Emit(kLoong64Ext_w_b, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if ((sar == shl) && (sar == 32)) {
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Sra_w, node);
+}
+
+void InstructionSelector::VisitWord64Shl(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
+ m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
+ // There's no need to sign/zero-extend to 64-bit if we shift out the upper
+ // 32 bits anyway.
+ Emit(kLoong64Sll_d, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()->InputAt(0)),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 63)) {
+ // Match Word64Shl(Word64And(x, mask), imm) to Sll_d where the mask is
+ // contiguous, and the shift immediate non-zero.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue()) {
+ uint64_t mask = mleft.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ uint64_t shift = m.right().ResolvedValue();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+ DCHECK_NE(0u, shift);
+
+ if ((shift + mask_width) >= 64) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kLoong64Sll_d, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Sll_d, node);
+}
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64And() && m.right().HasResolvedValue()) {
+ uint32_t lsb = m.right().ResolvedValue() & 0x3F;
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue() &&
+ mleft.right().ResolvedValue() != 0) {
+ // Select Bstrpick_d for Shr(And(x, mask), imm) where the result of the
+ // mask is shifted into the least-significant bits.
+ uint64_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_msb + mask_width + lsb) == 64) {
+ Loong64OperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
+ Emit(kLoong64Bstrpick_d, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Srl_d, node);
+}
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+ if (TryEmitExtendingLoad(this, node, node)) return;
+ VisitRRO(this, kLoong64Sra_d, node);
+}
+
+void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitRRO(this, kLoong64Rotr_w, node);
+}
+
+void InstructionSelector::VisitWord64Ror(Node* node) {
+ VisitRRO(this, kLoong64Rotr_d, node);
+}
+
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64ByteSwap32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64ByteSwap64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ VisitRR(this, kLoong64Clz_w, node);
+}
+
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ VisitRR(this, kLoong64Clz_d, node);
+}
+
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+
+ // Select Alsl_w for (left + (left_of_right << imm)).
+ if (m.right().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mright.right().ResolvedValue());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kLoong64Alsl_w, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(m.left().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+ }
+
+ // Select Alsl_w for ((left_of_left << imm) + right).
+ if (m.left().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
+ int32_t shift_value = static_cast<int32_t>(mleft.right().ResolvedValue());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kLoong64Alsl_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(m.right().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+ }
+
+ VisitBinop(this, node, kLoong64Add_w, true, kLoong64Add_w);
+}
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+
+ // Select Alsl_d for (left + (left_of_right << imm)).
+ if (m.right().opcode() == IrOpcode::kWord64Shl &&
+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+ Int64BinopMatcher mright(m.right().node());
+ if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mright.right().ResolvedValue());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kLoong64Alsl_d, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(m.left().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+ }
+
+ // Select Alsl_d for ((left_of_left << imm) + right).
+ if (m.left().opcode() == IrOpcode::kWord64Shl &&
+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
+ int32_t shift_value = static_cast<int32_t>(mleft.right().ResolvedValue());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kLoong64Alsl_d, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(m.right().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+ }
+
+ VisitBinop(this, node, kLoong64Add_d, true, kLoong64Add_d);
+}
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ VisitBinop(this, node, kLoong64Sub_w);
+}
+
+void InstructionSelector::VisitInt64Sub(Node* node) {
+ VisitBinop(this, node, kLoong64Sub_d);
+}
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
+ if (base::bits::IsPowerOfTwo(value)) {
+ Emit(kLoong64Sll_w | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value - 1) && value - 1 > 0 &&
+ value - 1 <= 31) {
+ Emit(kLoong64Alsl_w, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value + 1)) {
+ InstructionOperand temp = g.TempRegister();
+ Emit(kLoong64Sll_w | AddressingModeField::encode(kMode_None), temp,
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
+ Emit(kLoong64Sub_w | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+ return;
+ }
+ }
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher leftInput(left), rightInput(right);
+ if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
+ // Combine untagging shifts with Mulh_d.
+ Emit(kLoong64Mulh_d, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ VisitRRR(this, kLoong64Mul_w, node);
+}
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+ VisitRRR(this, kLoong64Mulh_w, node);
+}
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+ VisitRRR(this, kLoong64Mulh_wu, node);
+}
+
+void InstructionSelector::VisitInt64Mul(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
+ if (base::bits::IsPowerOfTwo(value)) {
+ Emit(kLoong64Sll_d | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value - 1) && value - 1 > 0 &&
+ value - 1 <= 31) {
+ // Alsl_d macro will handle the shifting value out of bound cases.
+ Emit(kLoong64Alsl_d, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value + 1)) {
+ InstructionOperand temp = g.TempRegister();
+ Emit(kLoong64Sll_d | AddressingModeField::encode(kMode_None), temp,
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
+ Emit(kLoong64Sub_d | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+ return;
+ }
+ }
+ Emit(kLoong64Mul_d, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Div_d.
+ Emit(kLoong64Div_d, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ Emit(kLoong64Div_w, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kLoong64Div_wu, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Mod_d.
+ Emit(kLoong64Mod_d, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ Emit(kLoong64Mod_w, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kLoong64Mod_wu, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt64Div(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kLoong64Div_d, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint64Div(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kLoong64Div_du, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt64Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kLoong64Mod_d, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint64Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kLoong64Mod_du, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Float32ToFloat64, node);
+}
+
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+ VisitRR(this, kLoong64Int32ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+ VisitRR(this, kLoong64Uint32ToFloat32, node);
+}
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Int32ToFloat64, node);
+}
+
+void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Int64ToFloat64, node);
+}
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Uint32ToFloat64, node);
+}
+
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionCode opcode = kLoong64Float32ToInt32;
+ TruncateKind kind = OpParameter<TruncateKind>(node->op());
+ if (kind == TruncateKind::kSetOverflowToMin) {
+ opcode |= MiscField::encode(true);
+ }
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionCode opcode = kLoong64Float32ToUint32;
+ TruncateKind kind = OpParameter<TruncateKind>(node->op());
+ if (kind == TruncateKind::kSetOverflowToMin) {
+ opcode |= MiscField::encode(true);
+ }
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // TODO(LOONG_dev): LOONG64 Match ChangeFloat64ToInt32(Float64Round##OP) to
+ // corresponding instruction which does rounding and conversion to
+ // integer format.
+ if (CanCover(node, value)) {
+ if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
+ Node* next = value->InputAt(0);
+ if (!CanCover(value, next)) {
+ // Match float32 -> float64 -> int32 representation change path.
+ Emit(kLoong64Float32ToInt32, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ }
+ }
+ VisitRR(this, kLoong64Float64ToInt32, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
+ VisitRR(this, kLoong64Float64ToInt64, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ VisitRR(this, kLoong64Float64ToUint32, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
+ VisitRR(this, kLoong64Float64ToUint64, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+ VisitRR(this, kLoong64Float64ToUint32, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionCode opcode = kLoong64Float64ToInt64;
+ TruncateKind kind = OpParameter<TruncateKind>(node->op());
+ if (kind == TruncateKind::kSetOverflowToMin) {
+ opcode |= MiscField::encode(true);
+ }
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ this->Emit(kLoong64Float32ToInt64, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kLoong64Float64ToInt64, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kLoong64Float32ToUint64, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ Loong64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kLoong64Float64ToUint64, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+#ifdef USE_SIMULATOR
+ Node* value = node->InputAt(0);
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kLoadImmutable) &&
+ CanCover(node, value)) {
+ // Generate sign-extending load.
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ InstructionCode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kLoong64Ld_bu : kLoong64Ld_b;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kLoong64Ld_hu : kLoong64Ld_h;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kLoong64Ld_w;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ EmitLoad(this, value, opcode, node);
+ } else {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.TempImmediate(0));
+ }
+#else
+ EmitIdentity(node);
+#endif
+}
+
+bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
+ DCHECK_NE(node->opcode(), IrOpcode::kPhi);
+ switch (node->opcode()) {
+ // Comparisons only emit 0/1, so the upper 32 bits must be zero.
+ case IrOpcode::kWord32Equal:
+ case IrOpcode::kInt32LessThan:
+ case IrOpcode::kInt32LessThanOrEqual:
+ case IrOpcode::kUint32LessThan:
+ case IrOpcode::kUint32LessThanOrEqual:
+ return true;
+ case IrOpcode::kWord32And: {
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
+ return is_uint31(mask);
+ }
+ return false;
+ }
+ case IrOpcode::kWord32Shr: {
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue()) {
+ uint8_t sa = m.right().ResolvedValue() & 0x1f;
+ return sa > 0;
+ }
+ return false;
+ }
+ case IrOpcode::kLoad:
+ case IrOpcode::kLoadImmutable: {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ if (load_rep.IsUnsigned()) {
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8: // Fall through.
+ case MachineRepresentation::kWord16:
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+
+ if (value->opcode() == IrOpcode::kLoad) {
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ if (load_rep.IsUnsigned() &&
+ load_rep.representation() == MachineRepresentation::kWord32) {
+ EmitLoad(this, value, kLoong64Ld_wu, node);
+ return;
+ }
+ }
+ if (ZeroExtendsWord32ToWord64(value)) {
+ EmitIdentity(node);
+ return;
+ }
+ Emit(kLoong64Bstrpick_d, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.TempImmediate(0),
+ g.TempImmediate(32));
+}
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord64Sar: {
+ if (CanCoverTransitively(node, value, value->InputAt(0)) &&
+ TryEmitExtendingLoad(this, value, node)) {
+ return;
+ } else {
+ Int64BinopMatcher m(value);
+ if (m.right().IsInRange(32, 63)) {
+ // After smi untagging no need for truncate. Combine sequence.
+ Emit(kLoong64Sra_d, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+}
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
+ // instruction.
+ if (CanCover(node, value) &&
+ value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
+ Emit(kLoong64Int32ToFloat32, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ VisitRR(this, kLoong64Float64ToFloat32, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
+}
+
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kLoong64Float64ToInt32, node);
+}
+
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ VisitRR(this, kLoong64Int64ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Int64ToFloat64, node);
+}
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ VisitRR(this, kLoong64Uint64ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Uint64ToFloat64, node);
+}
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kLoong64Float64ExtractLowWord32, node);
+}
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ VisitRR(this, kLoong64BitcastDL, node);
+}
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float64InsertLowWord32, g.DefineAsRegister(node),
+ ImmediateOperand(ImmediateOperand::INLINE_INT32, 0),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ VisitRR(this, kLoong64BitcastLD, node);
+}
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+ VisitRRR(this, kLoong64Float32Add, node);
+}
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ VisitRRR(this, kLoong64Float64Add, node);
+}
+
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+ VisitRRR(this, kLoong64Float32Sub, node);
+}
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ VisitRRR(this, kLoong64Float64Sub, node);
+}
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ VisitRRR(this, kLoong64Float32Mul, node);
+}
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ VisitRRR(this, kLoong64Float64Mul, node);
+}
+
+void InstructionSelector::VisitFloat32Div(Node* node) {
+ VisitRRR(this, kLoong64Float32Div, node);
+}
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ VisitRRR(this, kLoong64Float64Div, node);
+}
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float64Mod, g.DefineAsFixed(node, f0),
+ g.UseFixed(node->InputAt(0), f0), g.UseFixed(node->InputAt(1), f1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float32Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float64Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float32Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float64Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitRR(this, kLoong64Float32Abs, node);
+}
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitRR(this, kLoong64Float64Abs, node);
+}
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+ VisitRR(this, kLoong64Float32Sqrt, node);
+}
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ VisitRR(this, kLoong64Float64Sqrt, node);
+}
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kLoong64Float32RoundDown, node);
+}
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRR(this, kLoong64Float64RoundDown, node);
+}
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kLoong64Float32RoundUp, node);
+}
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kLoong64Float64RoundUp, node);
+}
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kLoong64Float32RoundTruncate, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ VisitRR(this, kLoong64Float64RoundTruncate, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kLoong64Float32RoundTiesEven, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kLoong64Float64RoundTiesEven, node);
+}
+
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ VisitRR(this, kLoong64Float32Neg, node);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ VisitRR(this, kLoong64Float64Neg, node);
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ Loong64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f0),
+ g.UseFixed(node->InputAt(1), f1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ Loong64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f0))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
+ Node* node) {
+ Loong64OperandGenerator g(this);
+
+ // Prepare for C function call.
+ if (call_descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ int slot = 0;
+ for (PushParameter input : (*arguments)) {
+ Emit(kLoong64Poke, g.NoOutput(), g.UseRegister(input.node),
+ g.TempImmediate(slot << kSystemPointerSizeLog2));
+ ++slot;
+ }
+ } else {
+ int push_count = static_cast<int>(call_descriptor->ParameterSlotCount());
+ if (push_count > 0) {
+ // Calculate needed space
+ int stack_size = 0;
+ for (PushParameter input : (*arguments)) {
+ if (input.node) {
+ stack_size += input.location.GetSizeInPointers();
+ }
+ }
+ Emit(kLoong64StackClaim, g.NoOutput(),
+ g.TempImmediate(stack_size << kSystemPointerSizeLog2));
+ }
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node) {
+ Emit(kLoong64Poke, g.NoOutput(), g.UseRegister(input.node),
+ g.TempImmediate(static_cast<int>(n << kSystemPointerSizeLog2)));
+ }
+ }
+ }
+}
+
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
+ Loong64OperandGenerator g(this);
+
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!call_descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ } else if (output.location.GetType() == MachineType::Simd128()) {
+ abort();
+ }
+ int offset = call_descriptor->GetOffsetToReturns();
+ int reverse_slot = -output.location.GetLocation() - offset;
+ Emit(kLoong64Peek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
+ }
+ }
+}
+
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+
+void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
+
+namespace {
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont) {
+ selector->EmitWithContinuation(opcode, left, right, cont);
+}
+
+// Shared routine for multiple float32 compare operations.
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ Float32BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kLoong64Float32Cmp, lhs, rhs, cont);
+}
+
+// Shared routine for multiple float64 compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ Float64BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kLoong64Float64Cmp, lhs, rhs, cont);
+}
+
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative) {
+ Loong64OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right, opcode)) {
+ if (opcode == kLoong64Tst) {
+ if (left->opcode() == IrOpcode::kTruncateInt64ToInt32) {
+ VisitCompare(selector, opcode, g.UseRegister(left->InputAt(0)),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ }
+ } else {
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ }
+ } else if (g.CanBeImmediate(left, opcode)) {
+ if (!commutative) cont->Commute();
+ if (opcode == kLoong64Tst) {
+ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+ cont);
+ } else {
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ }
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+ cont);
+ }
+}
+
+void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode,
+ FlagsContinuation* cont) {
+ // TODO(LOONG_dev): LOONG64 Add check for debug mode
+ VisitWordCompare(selector, node, opcode, cont, false);
+}
+
+#ifdef USE_SIMULATOR
+// Shared routine for multiple word compare operations.
+void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ InstructionOperand leftOp = g.TempRegister();
+ InstructionOperand rightOp = g.TempRegister();
+
+ selector->Emit(kLoong64Sll_d, leftOp, g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(32));
+ selector->Emit(kLoong64Sll_d, rightOp, g.UseRegister(node->InputAt(1)),
+ g.TempImmediate(32));
+
+ VisitCompare(selector, opcode, leftOp, rightOp, cont);
+}
+#endif
+
+void VisitWord32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ // LOONG64 doesn't support Word32 compare instructions. Instead it relies
+ // that the values in registers are correctly sign-extended and uses
+ // Word64 comparison instead.
+#ifdef USE_SIMULATOR
+ // When call to a host function in simulator, if the function return a
+ // int32 value, the simulator do not sign-extended to int64 because in
+ // simulator we do not know the function whether return a int32 or int64.
+ // so we need do a full word32 compare in this case.
+ if (node->InputAt(0)->opcode() == IrOpcode::kCall ||
+ node->InputAt(1)->opcode() == IrOpcode::kCall) {
+ VisitFullWord32Compare(selector, node, kLoong64Cmp, cont);
+ return;
+ }
+#endif
+ VisitOptimizedWord32Compare(selector, node, kLoong64Cmp, cont);
+}
+
+void VisitWord64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(selector, node, kLoong64Cmp, cont, false);
+}
+
+void EmitWordCompareZero(InstructionSelector* selector, Node* value,
+ FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ selector->EmitWithContinuation(kLoong64Cmp, g.UseRegister(value),
+ g.TempImmediate(0), cont);
+}
+
+void VisitAtomicLoad(InstructionSelector* selector, Node* node,
+ AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ // The memory order is ignored.
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ InstructionCode code;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = (width == AtomicWidth::kWord32) ? kAtomicLoadWord32
+ : kLoong64Word64AtomicLoadUint32;
+ break;
+ case MachineRepresentation::kWord64:
+ code = kLoong64Word64AtomicLoadUint64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(kTaggedSize, 8);
+ code = kLoong64Word64AtomicLoadUint64;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (g.CanBeImmediate(index, code)) {
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kLoong64Add_d | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void VisitAtomicStore(InstructionSelector* selector, Node* node,
+ AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ // The memory order is ignored.
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
+ MachineRepresentation rep = store_params.representation();
+
+ if (FLAG_enable_unconditional_write_barriers &&
+ CanBeTaggedOrCompressedPointer(rep)) {
+ write_barrier_kind = kFullWriteBarrier;
+ }
+
+ InstructionCode code;
+
+ if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
+ DCHECK(CanBeTaggedPointer(rep));
+ DCHECK_EQ(kTaggedSize, 8);
+
+ RecordWriteMode record_write_mode =
+ WriteBarrierKindToRecordWriteMode(write_barrier_kind);
+ code = kArchAtomicStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ } else {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ code = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ code = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicStoreWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ DCHECK_EQ(width, AtomicWidth::kWord64);
+ code = kLoong64Word64AtomicStoreWord64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(kTaggedSize, 8);
+ code = kLoong64StoreCompressTagged;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ if (g.CanBeImmediate(index, code)) {
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kLoong64Add_d | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.NoOutput(), addr_reg, g.TempImmediate(0),
+ g.UseRegisterOrImmediateZero(value));
+ }
+}
+
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
+ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
+}
+
+void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(old_value);
+ inputs[input_count++] = g.UseUniqueRegister(new_value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
+ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
+}
+
+void VisitAtomicBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temps[4];
+ temps[0] = g.TempRegister();
+ temps[1] = g.TempRegister();
+ temps[2] = g.TempRegister();
+ temps[3] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
+ selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
+}
+
+} // namespace
+
+void InstructionSelector::VisitStackPointerGreaterThan(
+ Node* node, FlagsContinuation* cont) {
+ StackCheckKind kind = StackCheckKindOf(node->op());
+ InstructionCode opcode =
+ kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
+
+ Loong64OperandGenerator g(this);
+
+ // No outputs.
+ InstructionOperand* const outputs = nullptr;
+ const int output_count = 0;
+
+ // TempRegister(0) is used to store the comparison result.
+ // Applying an offset to this stack check requires a temp register. Offsets
+ // are only applied to the first stack check. If applying an offset, we must
+ // ensure the input and temp registers do not alias, thus kUniqueRegister.
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 2 : 1);
+ const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
+ ? OperandGenerator::kUniqueRegister
+ : OperandGenerator::kRegister;
+
+ Node* const value = node->InputAt(0);
+ InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
+ static constexpr int input_count = arraysize(inputs);
+
+ EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
+ temp_count, temps, cont);
+}
+
+// Shared routine for word comparisons against zero.
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
+ // Try to combine with comparisons against 0 by simply inverting the branch.
+ while (CanCover(user, value)) {
+ if (value->opcode() == IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+ user = value;
+ value = m.left().node();
+ } else if (value->opcode() == IrOpcode::kWord64Equal) {
+ Int64BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+ user = value;
+ value = m.left().node();
+ } else {
+ break;
+ }
+
+ cont->Negate();
+ }
+
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kInt32LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kUint32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kWord64Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kInt64LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kInt64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kUint64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kFloat32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat32Compare(this, value, cont);
+ case IrOpcode::kFloat32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat32Compare(this, value, cont);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat32Compare(this, value, cont);
+ case IrOpcode::kFloat64Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat64Compare(this, value, cont);
+ case IrOpcode::kFloat64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat64Compare(this, value, cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat64Compare(this, value, cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (ProjectionIndexOf(value->op()) == 1u) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either nullptr, which means there's no use of the
+ // actual value, or was already defined, which means it is scheduled
+ // *AFTER* this branch).
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
+ if (result == nullptr || IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64Add_d, cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64Sub_d, cont);
+ case IrOpcode::kInt32MulWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64MulOvf_w, cont);
+ case IrOpcode::kInt64AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64AddOvf_d, cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64SubOvf_d, cont);
+ default:
+ break;
+ }
+ }
+ }
+ break;
+ case IrOpcode::kWord32And:
+ case IrOpcode::kWord64And:
+ return VisitWordCompare(this, value, kLoong64Tst, cont, true);
+ case IrOpcode::kStackPointerGreaterThan:
+ cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
+ return VisitStackPointerGreaterThan(value, cont);
+ default:
+ break;
+ }
+ }
+
+ // Continuation could not be combined with a compare, emit compare against 0.
+ EmitWordCompareZero(this, value, cont);
+}
+
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 10 + 2 * sw.value_range();
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 2 + 2 * sw.case_count();
+ size_t lookup_time_cost = sw.case_count();
+ if (sw.case_count() > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value() > std::numeric_limits<int32_t>::min() &&
+ sw.value_range() <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value()) {
+ index_operand = g.TempRegister();
+ Emit(kLoong64Sub_w, index_operand, value_operand,
+ g.TempImmediate(sw.min_value()));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+ }
+
+ // Generate a tree of conditional jumps.
+ return EmitBinarySearchSwitch(sw, value_operand);
+}
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
+ }
+
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64Add_d, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64Add_d, &cont);
+}
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64Sub_d, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64Sub_d, &cont);
+}
+
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64MulOvf_w, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64MulOvf_w, &cont);
+}
+
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64AddOvf_d, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64AddOvf_d, &cont);
+}
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64SubOvf_d, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64SubOvf_d, &cont);
+}
+
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ Int64BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
+ }
+
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ VisitRR(this, kLoong64Float64ExtractLowWord32, node);
+}
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ VisitRR(this, kLoong64Float64ExtractHighWord32, node);
+}
+
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ VisitRR(this, kLoong64Float64SilenceNaN, node);
+}
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kLoong64Float64InsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kLoong64Float64InsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Dbar, g.NoOutput());
+}
+
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
+ VisitAtomicLoad(this, node, AtomicWidth::kWord32);
+}
+
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
+ VisitAtomicStore(this, node, AtomicWidth::kWord32);
+}
+
+void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
+ VisitAtomicLoad(this, node, AtomicWidth::kWord64);
+}
+
+void InstructionSelector::VisitWord64AtomicStore(Node* node) {
+ VisitAtomicStore(this, node, AtomicWidth::kWord64);
+}
+
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicExchangeWord32;
+ } else {
+ UNREACHABLE();
+ }
+
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
+}
+
+void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kAtomicExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kAtomicExchangeWord32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kLoong64Word64AtomicExchangeUint64;
+ } else {
+ UNREACHABLE();
+ }
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
+}
+
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicCompareExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicCompareExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicCompareExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicCompareExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicCompareExchangeWord32;
+ } else {
+ UNREACHABLE();
+ }
+
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
+}
+
+void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kAtomicCompareExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicCompareExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kAtomicCompareExchangeWord32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kLoong64Word64AtomicCompareExchangeUint64;
+ } else {
+ UNREACHABLE();
+ }
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
+}
+void InstructionSelector::VisitWord32AtomicBinaryOperation(
+ Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
+ ArchOpcode uint16_op, ArchOpcode word32_op) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = int8_op;
+ } else if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Int16()) {
+ opcode = int16_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = word32_op;
+ } else {
+ UNREACHABLE();
+ }
+
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+void InstructionSelector::VisitWord64AtomicBinaryOperation(
+ Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
+ ArchOpcode uint64_op) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Uint32()) {
+ opcode = uint32_op;
+ } else if (type == MachineType::Uint64()) {
+ opcode = uint64_op;
+ } else {
+ UNREACHABLE();
+ }
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kLoong64Word64Atomic##op##Uint64); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+#define SIMD_TYPE_LIST(V) \
+ V(F64x2) \
+ V(F32x4) \
+ V(I64x2) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs, kLoong64F64x2Abs) \
+ V(F64x2Neg, kLoong64F64x2Neg) \
+ V(F64x2Sqrt, kLoong64F64x2Sqrt) \
+ V(F64x2Ceil, kLoong64F64x2Ceil) \
+ V(F64x2Floor, kLoong64F64x2Floor) \
+ V(F64x2Trunc, kLoong64F64x2Trunc) \
+ V(F64x2NearestInt, kLoong64F64x2NearestInt) \
+ V(I64x2Neg, kLoong64I64x2Neg) \
+ V(I64x2BitMask, kLoong64I64x2BitMask) \
+ V(F64x2ConvertLowI32x4S, kLoong64F64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U, kLoong64F64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4, kLoong64F64x2PromoteLowF32x4) \
+ V(F32x4SConvertI32x4, kLoong64F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kLoong64F32x4UConvertI32x4) \
+ V(F32x4Abs, kLoong64F32x4Abs) \
+ V(F32x4Neg, kLoong64F32x4Neg) \
+ V(F32x4Sqrt, kLoong64F32x4Sqrt) \
+ V(F32x4RecipApprox, kLoong64F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kLoong64F32x4RecipSqrtApprox) \
+ V(F32x4Ceil, kLoong64F32x4Ceil) \
+ V(F32x4Floor, kLoong64F32x4Floor) \
+ V(F32x4Trunc, kLoong64F32x4Trunc) \
+ V(F32x4NearestInt, kLoong64F32x4NearestInt) \
+ V(F32x4DemoteF64x2Zero, kLoong64F32x4DemoteF64x2Zero) \
+ V(I64x2Abs, kLoong64I64x2Abs) \
+ V(I64x2SConvertI32x4Low, kLoong64I64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High, kLoong64I64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low, kLoong64I64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High, kLoong64I64x2UConvertI32x4High) \
+ V(I32x4SConvertF32x4, kLoong64I32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4, kLoong64I32x4UConvertF32x4) \
+ V(I32x4Neg, kLoong64I32x4Neg) \
+ V(I32x4SConvertI16x8Low, kLoong64I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High, kLoong64I32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low, kLoong64I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High, kLoong64I32x4UConvertI16x8High) \
+ V(I32x4Abs, kLoong64I32x4Abs) \
+ V(I32x4BitMask, kLoong64I32x4BitMask) \
+ V(I32x4TruncSatF64x2SZero, kLoong64I32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero, kLoong64I32x4TruncSatF64x2UZero) \
+ V(I16x8Neg, kLoong64I16x8Neg) \
+ V(I16x8SConvertI8x16Low, kLoong64I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High, kLoong64I16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low, kLoong64I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High, kLoong64I16x8UConvertI8x16High) \
+ V(I16x8Abs, kLoong64I16x8Abs) \
+ V(I16x8BitMask, kLoong64I16x8BitMask) \
+ V(I8x16Neg, kLoong64I8x16Neg) \
+ V(I8x16Abs, kLoong64I8x16Abs) \
+ V(I8x16Popcnt, kLoong64I8x16Popcnt) \
+ V(I8x16BitMask, kLoong64I8x16BitMask) \
+ V(S128Not, kLoong64S128Not) \
+ V(I64x2AllTrue, kLoong64I64x2AllTrue) \
+ V(I32x4AllTrue, kLoong64I32x4AllTrue) \
+ V(I16x8AllTrue, kLoong64I16x8AllTrue) \
+ V(I8x16AllTrue, kLoong64I8x16AllTrue) \
+ V(V128AnyTrue, kLoong64V128AnyTrue)
+
+#define SIMD_SHIFT_OP_LIST(V) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2ShrU) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add, kLoong64F64x2Add) \
+ V(F64x2Sub, kLoong64F64x2Sub) \
+ V(F64x2Mul, kLoong64F64x2Mul) \
+ V(F64x2Div, kLoong64F64x2Div) \
+ V(F64x2Min, kLoong64F64x2Min) \
+ V(F64x2Max, kLoong64F64x2Max) \
+ V(F64x2Eq, kLoong64F64x2Eq) \
+ V(F64x2Ne, kLoong64F64x2Ne) \
+ V(F64x2Lt, kLoong64F64x2Lt) \
+ V(F64x2Le, kLoong64F64x2Le) \
+ V(I64x2Eq, kLoong64I64x2Eq) \
+ V(I64x2Ne, kLoong64I64x2Ne) \
+ V(I64x2Add, kLoong64I64x2Add) \
+ V(I64x2Sub, kLoong64I64x2Sub) \
+ V(I64x2Mul, kLoong64I64x2Mul) \
+ V(I64x2GtS, kLoong64I64x2GtS) \
+ V(I64x2GeS, kLoong64I64x2GeS) \
+ V(F32x4Add, kLoong64F32x4Add) \
+ V(F32x4Sub, kLoong64F32x4Sub) \
+ V(F32x4Mul, kLoong64F32x4Mul) \
+ V(F32x4Div, kLoong64F32x4Div) \
+ V(F32x4Max, kLoong64F32x4Max) \
+ V(F32x4Min, kLoong64F32x4Min) \
+ V(F32x4Eq, kLoong64F32x4Eq) \
+ V(F32x4Ne, kLoong64F32x4Ne) \
+ V(F32x4Lt, kLoong64F32x4Lt) \
+ V(F32x4Le, kLoong64F32x4Le) \
+ V(I32x4Add, kLoong64I32x4Add) \
+ V(I32x4Sub, kLoong64I32x4Sub) \
+ V(I32x4Mul, kLoong64I32x4Mul) \
+ V(I32x4MaxS, kLoong64I32x4MaxS) \
+ V(I32x4MinS, kLoong64I32x4MinS) \
+ V(I32x4MaxU, kLoong64I32x4MaxU) \
+ V(I32x4MinU, kLoong64I32x4MinU) \
+ V(I32x4Eq, kLoong64I32x4Eq) \
+ V(I32x4Ne, kLoong64I32x4Ne) \
+ V(I32x4GtS, kLoong64I32x4GtS) \
+ V(I32x4GeS, kLoong64I32x4GeS) \
+ V(I32x4GtU, kLoong64I32x4GtU) \
+ V(I32x4GeU, kLoong64I32x4GeU) \
+ V(I32x4DotI16x8S, kLoong64I32x4DotI16x8S) \
+ V(I16x8Add, kLoong64I16x8Add) \
+ V(I16x8AddSatS, kLoong64I16x8AddSatS) \
+ V(I16x8AddSatU, kLoong64I16x8AddSatU) \
+ V(I16x8Sub, kLoong64I16x8Sub) \
+ V(I16x8SubSatS, kLoong64I16x8SubSatS) \
+ V(I16x8SubSatU, kLoong64I16x8SubSatU) \
+ V(I16x8Mul, kLoong64I16x8Mul) \
+ V(I16x8MaxS, kLoong64I16x8MaxS) \
+ V(I16x8MinS, kLoong64I16x8MinS) \
+ V(I16x8MaxU, kLoong64I16x8MaxU) \
+ V(I16x8MinU, kLoong64I16x8MinU) \
+ V(I16x8Eq, kLoong64I16x8Eq) \
+ V(I16x8Ne, kLoong64I16x8Ne) \
+ V(I16x8GtS, kLoong64I16x8GtS) \
+ V(I16x8GeS, kLoong64I16x8GeS) \
+ V(I16x8GtU, kLoong64I16x8GtU) \
+ V(I16x8GeU, kLoong64I16x8GeU) \
+ V(I16x8RoundingAverageU, kLoong64I16x8RoundingAverageU) \
+ V(I16x8SConvertI32x4, kLoong64I16x8SConvertI32x4) \
+ V(I16x8UConvertI32x4, kLoong64I16x8UConvertI32x4) \
+ V(I16x8Q15MulRSatS, kLoong64I16x8Q15MulRSatS) \
+ V(I8x16Add, kLoong64I8x16Add) \
+ V(I8x16AddSatS, kLoong64I8x16AddSatS) \
+ V(I8x16AddSatU, kLoong64I8x16AddSatU) \
+ V(I8x16Sub, kLoong64I8x16Sub) \
+ V(I8x16SubSatS, kLoong64I8x16SubSatS) \
+ V(I8x16SubSatU, kLoong64I8x16SubSatU) \
+ V(I8x16MaxS, kLoong64I8x16MaxS) \
+ V(I8x16MinS, kLoong64I8x16MinS) \
+ V(I8x16MaxU, kLoong64I8x16MaxU) \
+ V(I8x16MinU, kLoong64I8x16MinU) \
+ V(I8x16Eq, kLoong64I8x16Eq) \
+ V(I8x16Ne, kLoong64I8x16Ne) \
+ V(I8x16GtS, kLoong64I8x16GtS) \
+ V(I8x16GeS, kLoong64I8x16GeS) \
+ V(I8x16GtU, kLoong64I8x16GtU) \
+ V(I8x16GeU, kLoong64I8x16GeU) \
+ V(I8x16RoundingAverageU, kLoong64I8x16RoundingAverageU) \
+ V(I8x16SConvertI16x8, kLoong64I8x16SConvertI16x8) \
+ V(I8x16UConvertI16x8, kLoong64I8x16UConvertI16x8) \
+ V(S128And, kLoong64S128And) \
+ V(S128Or, kLoong64S128Or) \
+ V(S128Xor, kLoong64S128Xor) \
+ V(S128AndNot, kLoong64S128AndNot)
+
+void InstructionSelector::VisitS128Const(Node* node) {
+ Loong64OperandGenerator g(this);
+ static const int kUint32Immediates = kSimd128Size / sizeof(uint32_t);
+ uint32_t val[kUint32Immediates];
+ memcpy(val, S128ImmediateParameterOf(node->op()).data(), kSimd128Size);
+ // If all bytes are zeros or ones, avoid emitting code for generic constants
+ bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
+ bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
+ val[2] == UINT32_MAX && val[3] == UINT32_MAX;
+ InstructionOperand dst = g.DefineAsRegister(node);
+ if (all_zeros) {
+ Emit(kLoong64S128Zero, dst);
+ } else if (all_ones) {
+ Emit(kLoong64S128AllOnes, dst);
+ } else {
+ Emit(kLoong64S128Const, dst, g.UseImmediate(val[0]), g.UseImmediate(val[1]),
+ g.UseImmediate(val[2]), g.UseImmediate(val[3]));
+ }
+}
+
+void InstructionSelector::VisitS128Zero(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64S128Zero, g.DefineAsRegister(node));
+}
+
+#define SIMD_VISIT_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ VisitRR(this, kLoong64##Type##Splat, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
+#undef SIMD_VISIT_SPLAT
+
+#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
+ void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
+ VisitRRI(this, kLoong64##Type##ExtractLane##Sign, node); \
+ }
+SIMD_VISIT_EXTRACT_LANE(F64x2, )
+SIMD_VISIT_EXTRACT_LANE(F32x4, )
+SIMD_VISIT_EXTRACT_LANE(I64x2, )
+SIMD_VISIT_EXTRACT_LANE(I32x4, )
+SIMD_VISIT_EXTRACT_LANE(I16x8, U)
+SIMD_VISIT_EXTRACT_LANE(I16x8, S)
+SIMD_VISIT_EXTRACT_LANE(I8x16, U)
+SIMD_VISIT_EXTRACT_LANE(I8x16, S)
+#undef SIMD_VISIT_EXTRACT_LANE
+
+#define SIMD_VISIT_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ VisitRRIR(this, kLoong64##Type##ReplaceLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+#undef SIMD_VISIT_REPLACE_LANE
+
+#define SIMD_VISIT_UNOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, instruction, node); \
+ }
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
+
+#define SIMD_VISIT_SHIFT_OP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitSimdShift(this, kLoong64##Name, node); \
+ }
+SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
+#undef SIMD_VISIT_SHIFT_OP
+
+#define SIMD_VISIT_BINOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, instruction, node); \
+ }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
+
+void InstructionSelector::VisitS128Select(Node* node) {
+ VisitRRRR(this, kLoong64S128Select, node);
+}
+
+#if V8_ENABLE_WEBASSEMBLY
+namespace {
+
+struct ShuffleEntry {
+ uint8_t shuffle[kSimd128Size];
+ ArchOpcode opcode;
+};
+
+static const ShuffleEntry arch_shuffles[] = {
+ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+ kLoong64S32x4InterleaveRight},
+ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+ kLoong64S32x4InterleaveLeft},
+ {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
+ kLoong64S32x4PackEven},
+ {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
+ kLoong64S32x4PackOdd},
+ {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
+ kLoong64S32x4InterleaveEven},
+ {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
+ kLoong64S32x4InterleaveOdd},
+
+ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+ kLoong64S16x8InterleaveRight},
+ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+ kLoong64S16x8InterleaveLeft},
+ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+ kLoong64S16x8PackEven},
+ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+ kLoong64S16x8PackOdd},
+ {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
+ kLoong64S16x8InterleaveEven},
+ {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
+ kLoong64S16x8InterleaveOdd},
+ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
+ kLoong64S16x4Reverse},
+ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
+ kLoong64S16x2Reverse},
+
+ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+ kLoong64S8x16InterleaveRight},
+ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+ kLoong64S8x16InterleaveLeft},
+ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ kLoong64S8x16PackEven},
+ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+ kLoong64S8x16PackOdd},
+ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+ kLoong64S8x16InterleaveEven},
+ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+ kLoong64S8x16InterleaveOdd},
+ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
+ kLoong64S8x8Reverse},
+ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
+ kLoong64S8x4Reverse},
+ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
+ kLoong64S8x2Reverse}};
+
+bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+ size_t num_entries, bool is_swizzle,
+ ArchOpcode* opcode) {
+ uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
+ for (size_t i = 0; i < num_entries; ++i) {
+ const ShuffleEntry& entry = table[i];
+ int j = 0;
+ for (; j < kSimd128Size; ++j) {
+ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+ break;
+ }
+ }
+ if (j == kSimd128Size) {
+ *opcode = entry.opcode;
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+void InstructionSelector::VisitI8x16Shuffle(Node* node) {
+ uint8_t shuffle[kSimd128Size];
+ bool is_swizzle;
+ CanonicalizeShuffle(node, shuffle, &is_swizzle);
+ uint8_t shuffle32x4[4];
+ ArchOpcode opcode;
+ if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
+ is_swizzle, &opcode)) {
+ VisitRRR(this, opcode, node);
+ return;
+ }
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
+ uint8_t offset;
+ Loong64OperandGenerator g(this);
+ if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
+ Emit(kLoong64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
+ g.UseRegister(input0), g.UseImmediate(offset));
+ return;
+ }
+ if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ Emit(kLoong64S32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
+ return;
+ }
+ Emit(kLoong64I8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
+}
+#else
+void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
+#endif // V8_ENABLE_WEBASSEMBLY
+
+void InstructionSelector::VisitI8x16Swizzle(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ // We don't want input 0 or input 1 to be the same as output, since we will
+ // modify output before do the calculation.
+ Emit(kLoong64I8x16Swizzle, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Ext_w_b, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Ext_w_h, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Ext_w_b, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Ext_w_h, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+}
+
+void InstructionSelector::VisitF32x4Pmin(Node* node) {
+ VisitUniqueRRR(this, kLoong64F32x4Pmin, node);
+}
+
+void InstructionSelector::VisitF32x4Pmax(Node* node) {
+ VisitUniqueRRR(this, kLoong64F32x4Pmax, node);
+}
+
+void InstructionSelector::VisitF64x2Pmin(Node* node) {
+ VisitUniqueRRR(this, kLoong64F64x2Pmin, node);
+}
+
+void InstructionSelector::VisitF64x2Pmax(Node* node) {
+ VisitUniqueRRR(this, kLoong64F64x2Pmax, node);
+}
+
+#define VISIT_EXT_MUL(OPCODE1, OPCODE2) \
+ void InstructionSelector::Visit##OPCODE1##ExtMulLow##OPCODE2(Node* node) {} \
+ void InstructionSelector::Visit##OPCODE1##ExtMulHigh##OPCODE2(Node* node) {}
+
+VISIT_EXT_MUL(I64x2, I32x4S)
+VISIT_EXT_MUL(I64x2, I32x4U)
+VISIT_EXT_MUL(I32x4, I16x8S)
+VISIT_EXT_MUL(I32x4, I16x8U)
+VISIT_EXT_MUL(I16x8, I8x16S)
+VISIT_EXT_MUL(I16x8, I8x16U)
+#undef VISIT_EXT_MUL
+
+#define VISIT_EXTADD_PAIRWISE(OPCODE) \
+ void InstructionSelector::Visit##OPCODE(Node* node) { \
+ Loong64OperandGenerator g(this); \
+ Emit(kLoong64ExtAddPairwise, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0))); \
+ }
+VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16S)
+VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16U)
+VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8S)
+VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8U)
+#undef VISIT_EXTADD_PAIRWISE
+
+void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
+ int first_input_index,
+ Node* node) {
+ UNREACHABLE();
+}
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
+ return flags | MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesEven |
+ MachineOperatorBuilder::kFloat32RoundTiesEven;
+}
+
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+}
+
+#undef SIMD_BINOP_LIST
+#undef SIMD_SHIFT_OP_LIST
+#undef SIMD_UNOP_LIST
+#undef SIMD_TYPE_LIST
+#undef TRACE_UNIMPL
+#undef TRACE
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 2b8197e7e6..736248c824 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -93,7 +93,6 @@ class MipsOperandConverter final : public InstructionOperandConverter {
constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
- break;
}
UNREACHABLE();
}
@@ -313,16 +312,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
<< "\""; \
UNIMPLEMENTED();
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- MipsOperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -614,31 +603,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- // difference = (current - expected) | (expected - current)
- // poison = ~(difference >> (kBitsPerSystemPointer - 1))
- __ ComputeCodeStartAddress(kScratchReg);
- __ Move(kSpeculationPoisonRegister, kScratchReg);
- __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
- kScratchReg);
- __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kBitsPerSystemPointer - 1);
- __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kSpeculationPoisonRegister);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ And(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -902,7 +866,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
i.InputDoubleRegister(0), DetermineStubCallMode());
break;
- case kArchStoreWithWriteBarrier: {
+ case kArchStoreWithWriteBarrier:
+ case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
@@ -914,7 +879,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
scratch0, scratch1, mode,
DetermineStubCallMode());
__ Addu(kScratchReg, object, index);
- __ sw(value, MemOperand(kScratchReg));
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ __ sw(value, MemOperand(kScratchReg));
+ } else {
+ DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
+ __ sync();
+ __ sw(value, MemOperand(kScratchReg));
+ __ sync();
+ }
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@@ -938,10 +910,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kArchWordPoisonOnSpeculation:
- __ And(i.OutputRegister(), i.InputRegister(0),
- kSpeculationPoisonRegister);
- break;
case kIeee754Float64Acos:
ASSEMBLE_IEEE754_UNOP(acos);
break;
@@ -1541,30 +1509,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMipsLbu:
__ lbu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsLb:
__ lb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSb:
__ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMipsLhu:
__ lhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsLh:
__ lh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSh:
__ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1574,11 +1536,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMipsLw:
__ lw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSw:
__ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1658,7 +1618,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
default: {
UNREACHABLE();
- break;
}
}
} else {
@@ -1823,74 +1782,74 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_w(dst, kSimd128RegZero, dst);
break;
}
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
break;
- case kWord32AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
break;
- case kWord32AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
break;
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
break;
- case kWord32AtomicStoreWord8:
+ case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
break;
- case kWord32AtomicStoreWord16:
+ case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
break;
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 8);
break;
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 8);
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 16);
break;
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 16);
break;
- case kWord32AtomicExchangeWord32:
+ case kAtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER();
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 8);
break;
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 8);
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 16);
break;
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 16);
break;
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER();
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
+ case kAtomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP_EXT(true, 8, inst); \
break; \
- case kWord32Atomic##op##Uint8: \
+ case kAtomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP_EXT(false, 8, inst); \
break; \
- case kWord32Atomic##op##Int16: \
+ case kAtomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP_EXT(true, 16, inst); \
break; \
- case kWord32Atomic##op##Uint16: \
+ case kAtomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP_EXT(false, 16, inst); \
break; \
- case kWord32Atomic##op##Word32: \
+ case kAtomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(inst); \
break;
ATOMIC_BINOP_CASE(Add, Addu)
@@ -3675,7 +3634,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
- break;
}
} else if (instr->arch_opcode() == kMipsMulOvf) {
// Overflow occurs if overflow register is not zero
@@ -3688,7 +3646,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(kMipsMulOvf, condition);
- break;
}
} else if (instr->arch_opcode() == kMipsCmp) {
cc = FlagsConditionToConditionCmp(condition);
@@ -3727,85 +3684,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- MipsOperandConverter i(this, instr);
- condition = NegateFlagsCondition(condition);
-
- switch (instr->arch_opcode()) {
- case kMipsCmp: {
- __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0),
- i.InputOperand(1),
- FlagsConditionToConditionCmp(condition));
- }
- return;
- case kMipsTst: {
- switch (condition) {
- case kEqual:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- case kNotEqual:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- default:
- UNREACHABLE();
- }
- }
- return;
- case kMipsAddOvf:
- case kMipsSubOvf: {
- // Overflow occurs if overflow register is negative
- __ Slt(kScratchReg2, kScratchReg, zero_reg);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMipsMulOvf: {
- // Overflow occurs if overflow register is not zero
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMipsCmpS:
- case kMipsCmpD: {
- bool predicate;
- FlagsConditionToConditionCmpFPU(&predicate, condition);
- if (predicate) {
- __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister);
- } else {
- __ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister);
- }
- }
- return;
- default:
- UNREACHABLE();
- }
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -4130,7 +4008,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -4333,7 +4210,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
break;
case Constant::kInt64:
UNREACHABLE();
- break;
case Constant::kFloat64:
__ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
@@ -4357,7 +4233,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
UNREACHABLE();
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips.
- break;
}
if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 48635c9c15..aeb1756227 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -1444,8 +1444,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
AdduLatency(false) + AndLatency(false) + BranchShortLatency() + 1 +
SubuLatency() + AdduLatency();
}
- case kArchWordPoisonOnSpeculation:
- return AndLatency();
case kIeee754Float64Acos:
case kIeee754Float64Acosh:
case kIeee754Float64Asin:
@@ -1657,19 +1655,15 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
switch (op->representation()) {
case MachineRepresentation::kFloat32:
return Latency::SWC1 + SubuLatency(false);
- break;
case MachineRepresentation::kFloat64:
return Sdc1Latency() + SubuLatency(false);
- break;
default: {
UNREACHABLE();
- break;
}
}
} else {
return PushRegisterLatency();
}
- break;
}
case kMipsPeek: {
if (instr->OutputAt(0)->IsFPRegister()) {
@@ -1682,7 +1676,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
} else {
return 1;
}
- break;
}
case kMipsStackClaim:
return SubuLatency(false);
@@ -1699,41 +1692,40 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
} else {
return 1;
}
- break;
}
case kMipsByteSwap32:
return ByteSwapSignedLatency();
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
return 2;
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
return 3;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
return Word32AtomicExchangeLatency(true, 8);
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
return Word32AtomicExchangeLatency(false, 8);
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
return Word32AtomicExchangeLatency(true, 16);
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
return Word32AtomicExchangeLatency(false, 16);
- case kWord32AtomicExchangeWord32: {
+ case kAtomicExchangeWord32: {
return 1 + AdduLatency() + Ldc1Latency() + 1 + ScLatency(0) +
BranchShortLatency() + 1;
}
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
return Word32AtomicCompareExchangeLatency(true, 8);
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
return Word32AtomicCompareExchangeLatency(false, 8);
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
return Word32AtomicCompareExchangeLatency(true, 16);
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
return Word32AtomicCompareExchangeLatency(false, 16);
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
return AdduLatency() + 1 + LlLatency(0) + BranchShortLatency() + 1;
case kMipsTst:
return AndLatency(instr->InputAt(1)->IsRegister());
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index c823612246..477c791ca0 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -375,10 +375,6 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
@@ -393,8 +389,6 @@ void InstructionSelector::VisitLoad(Node* node) {
}
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1906,22 +1900,26 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ // TODO(mips-dev): Confirm whether there is any mips32 chip in use and
+ // support atomic loads of tagged values with barriers.
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
+ opcode = kAtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -1941,7 +1939,10 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ // TODO(mips-dev): Confirm whether there is any mips32 chip in use and
+ // support atomic stores of tagged values with barriers.
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ MachineRepresentation rep = store_params.representation();
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1949,13 +1950,16 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
+ opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
+ opcode = kAtomicStoreWord16;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
+ opcode = kAtomicStoreWord32;
break;
default:
UNREACHABLE();
@@ -1983,15 +1987,15 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2021,15 +2025,15 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2091,12 +2095,11 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, 4, temps);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 6fce103d24..f6fccd43d2 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -95,7 +95,6 @@ class MipsOperandConverter final : public InstructionOperandConverter {
constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
- break;
}
UNREACHABLE();
}
@@ -321,16 +320,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- MipsOperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -577,31 +566,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- // difference = (current - expected) | (expected - current)
- // poison = ~(difference >> (kBitsPerSystemPointer - 1))
- __ ComputeCodeStartAddress(kScratchReg);
- __ Move(kSpeculationPoisonRegister, kScratchReg);
- __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
- kScratchReg);
- __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kBitsPerSystemPointer - 1);
- __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kSpeculationPoisonRegister);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ And(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -803,7 +767,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchBinarySearchSwitch:
AssembleArchBinarySearchSwitch(instr);
break;
- break;
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
@@ -864,7 +827,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
i.InputDoubleRegister(0), DetermineStubCallMode());
break;
- case kArchStoreWithWriteBarrier: {
+ case kArchStoreWithWriteBarrier: // Fall through.
+ case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
@@ -876,7 +840,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
scratch0, scratch1, mode,
DetermineStubCallMode());
__ Daddu(kScratchReg, object, index);
- __ Sd(value, MemOperand(kScratchReg));
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ __ Sd(value, MemOperand(kScratchReg));
+ } else {
+ DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
+ __ sync();
+ __ Sd(value, MemOperand(kScratchReg));
+ __ sync();
+ }
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@@ -900,10 +871,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kArchWordPoisonOnSpeculation:
- __ And(i.OutputRegister(), i.InputRegister(0),
- kSpeculationPoisonRegister);
- break;
case kIeee754Float64Acos:
ASSEMBLE_IEEE754_UNOP(acos);
break;
@@ -1646,30 +1613,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Lbu:
__ Lbu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lb:
__ Lb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sb:
__ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Lhu:
__ Lhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lh:
__ Lh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sh:
__ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1679,27 +1640,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Lw:
__ Lw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lwu:
__ Lwu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulwu:
__ Ulwu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ld:
__ Ld(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Uld:
__ Uld(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sw:
__ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1919,149 +1874,172 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ StoreLane(sz, src, i.InputUint8(1), i.MemoryOperand(2));
break;
}
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
break;
- case kWord32AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lh);
break;
- case kWord32AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
break;
- case kWord32AtomicLoadWord32:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
- break;
- case kMips64Word64AtomicLoadUint8:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
- break;
- case kMips64Word64AtomicLoadUint16:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
- break;
- case kMips64Word64AtomicLoadUint32:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
+ case kAtomicLoadWord32:
+ if (AtomicWidthField::decode(opcode) == AtomicWidth::kWord32)
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
+ else
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
break;
case kMips64Word64AtomicLoadUint64:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld);
break;
- case kWord32AtomicStoreWord8:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
- break;
- case kWord32AtomicStoreWord16:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
- break;
- case kWord32AtomicStoreWord32:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
- break;
- case kMips64Word64AtomicStoreWord8:
+ case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
break;
- case kMips64Word64AtomicStoreWord16:
+ case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
break;
- case kMips64Word64AtomicStoreWord32:
+ case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
break;
+ case kMips64StoreCompressTagged:
case kMips64Word64AtomicStoreWord64:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sd);
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
break;
- case kWord32AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ case kAtomicExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ }
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
break;
- case kWord32AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
- break;
- case kWord32AtomicExchangeWord32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
- break;
- case kMips64Word64AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
- break;
- case kMips64Word64AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ case kAtomicExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ }
break;
- case kMips64Word64AtomicExchangeUint32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ case kAtomicExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ }
break;
case kMips64Word64AtomicExchangeUint64:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
break;
- case kWord32AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ case kAtomicCompareExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ }
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
break;
- case kWord32AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
- break;
- case kWord32AtomicCompareExchangeWord32:
- __ sll(i.InputRegister(2), i.InputRegister(2), 0);
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
- break;
- case kMips64Word64AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
- break;
- case kMips64Word64AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ case kAtomicCompareExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ }
break;
- case kMips64Word64AtomicCompareExchangeUint32:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ case kAtomicCompareExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ sll(i.InputRegister(2), i.InputRegister(2), 0);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ }
break;
case kMips64Word64AtomicCompareExchangeUint64:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
break;
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst, 32); \
- break; \
- case kWord32Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst, 32); \
- break; \
- case kWord32Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst, 32); \
- break; \
- case kWord32Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst, 32); \
- break; \
- case kWord32Atomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst); \
- break;
- ATOMIC_BINOP_CASE(Add, Addu)
- ATOMIC_BINOP_CASE(Sub, Subu)
- ATOMIC_BINOP_CASE(And, And)
- ATOMIC_BINOP_CASE(Or, Or)
- ATOMIC_BINOP_CASE(Xor, Xor)
-#undef ATOMIC_BINOP_CASE
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kMips64Word64Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst, 64); \
- break; \
- case kMips64Word64Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst, 64); \
- break; \
- case kMips64Word64Atomic##op##Uint32: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst, 64); \
- break; \
- case kMips64Word64Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst); \
+#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
+ case kAtomic##op##Int8: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint8: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Int16: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint16: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Word32: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kMips64Word64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst64); \
break;
- ATOMIC_BINOP_CASE(Add, Daddu)
- ATOMIC_BINOP_CASE(Sub, Dsubu)
- ATOMIC_BINOP_CASE(And, And)
- ATOMIC_BINOP_CASE(Or, Or)
- ATOMIC_BINOP_CASE(Xor, Xor)
+ ATOMIC_BINOP_CASE(Add, Addu, Daddu)
+ ATOMIC_BINOP_CASE(Sub, Subu, Dsubu)
+ ATOMIC_BINOP_CASE(And, And, And)
+ ATOMIC_BINOP_CASE(Or, Or, Or)
+ ATOMIC_BINOP_CASE(Xor, Xor, Xor)
#undef ATOMIC_BINOP_CASE
case kMips64AssertEqual:
__ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
@@ -3851,7 +3829,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
- break;
}
} else if (instr->arch_opcode() == kMips64MulOvf) {
// Overflow occurs if overflow register is not zero
@@ -3864,7 +3841,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(kMipsMulOvf, condition);
- break;
}
} else if (instr->arch_opcode() == kMips64Cmp) {
cc = FlagsConditionToConditionCmp(condition);
@@ -3904,104 +3880,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- MipsOperandConverter i(this, instr);
- condition = NegateFlagsCondition(condition);
-
- switch (instr->arch_opcode()) {
- case kMips64Cmp: {
- __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0),
- i.InputOperand(1),
- FlagsConditionToConditionCmp(condition));
- }
- return;
- case kMips64Tst: {
- switch (condition) {
- case kEqual:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- case kNotEqual:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- default:
- UNREACHABLE();
- }
- }
- return;
- case kMips64Dadd:
- case kMips64Dsub: {
- // Check for overflow creates 1 or 0 for result.
- __ dsrl32(kScratchReg, i.OutputRegister(), 31);
- __ srl(kScratchReg2, i.OutputRegister(), 31);
- __ xor_(kScratchReg2, kScratchReg, kScratchReg2);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMips64DaddOvf:
- case kMips64DsubOvf: {
- // Overflow occurs if overflow register is negative
- __ Slt(kScratchReg2, kScratchReg, zero_reg);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMips64MulOvf: {
- // Overflow occurs if overflow register is not zero
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMips64CmpS:
- case kMips64CmpD: {
- bool predicate;
- FlagsConditionToConditionCmpFPU(&predicate, condition);
- if (predicate) {
- __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister);
- } else {
- __ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister);
- }
- }
- return;
- default:
- UNREACHABLE();
- }
-}
-
#undef UNSUPPORTED_COND
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
@@ -4340,7 +4218,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -4568,7 +4445,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
UNREACHABLE();
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64.
- break;
}
if (destination->IsStackSlot()) __ Sd(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index e1b40a4be5..30d7f5af75 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -11,419 +11,393 @@ namespace compiler {
// MIPS64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Mips64Add) \
- V(Mips64Dadd) \
- V(Mips64DaddOvf) \
- V(Mips64Sub) \
- V(Mips64Dsub) \
- V(Mips64DsubOvf) \
- V(Mips64Mul) \
- V(Mips64MulOvf) \
- V(Mips64MulHigh) \
- V(Mips64DMulHigh) \
- V(Mips64MulHighU) \
- V(Mips64Dmul) \
- V(Mips64Div) \
- V(Mips64Ddiv) \
- V(Mips64DivU) \
- V(Mips64DdivU) \
- V(Mips64Mod) \
- V(Mips64Dmod) \
- V(Mips64ModU) \
- V(Mips64DmodU) \
- V(Mips64And) \
- V(Mips64And32) \
- V(Mips64Or) \
- V(Mips64Or32) \
- V(Mips64Nor) \
- V(Mips64Nor32) \
- V(Mips64Xor) \
- V(Mips64Xor32) \
- V(Mips64Clz) \
- V(Mips64Lsa) \
- V(Mips64Dlsa) \
- V(Mips64Shl) \
- V(Mips64Shr) \
- V(Mips64Sar) \
- V(Mips64Ext) \
- V(Mips64Ins) \
- V(Mips64Dext) \
- V(Mips64Dins) \
- V(Mips64Dclz) \
- V(Mips64Ctz) \
- V(Mips64Dctz) \
- V(Mips64Popcnt) \
- V(Mips64Dpopcnt) \
- V(Mips64Dshl) \
- V(Mips64Dshr) \
- V(Mips64Dsar) \
- V(Mips64Ror) \
- V(Mips64Dror) \
- V(Mips64Mov) \
- V(Mips64Tst) \
- V(Mips64Cmp) \
- V(Mips64CmpS) \
- V(Mips64AddS) \
- V(Mips64SubS) \
- V(Mips64MulS) \
- V(Mips64DivS) \
- V(Mips64AbsS) \
- V(Mips64NegS) \
- V(Mips64SqrtS) \
- V(Mips64MaxS) \
- V(Mips64MinS) \
- V(Mips64CmpD) \
- V(Mips64AddD) \
- V(Mips64SubD) \
- V(Mips64MulD) \
- V(Mips64DivD) \
- V(Mips64ModD) \
- V(Mips64AbsD) \
- V(Mips64NegD) \
- V(Mips64SqrtD) \
- V(Mips64MaxD) \
- V(Mips64MinD) \
- V(Mips64Float64RoundDown) \
- V(Mips64Float64RoundTruncate) \
- V(Mips64Float64RoundUp) \
- V(Mips64Float64RoundTiesEven) \
- V(Mips64Float32RoundDown) \
- V(Mips64Float32RoundTruncate) \
- V(Mips64Float32RoundUp) \
- V(Mips64Float32RoundTiesEven) \
- V(Mips64CvtSD) \
- V(Mips64CvtDS) \
- V(Mips64TruncWD) \
- V(Mips64RoundWD) \
- V(Mips64FloorWD) \
- V(Mips64CeilWD) \
- V(Mips64TruncWS) \
- V(Mips64RoundWS) \
- V(Mips64FloorWS) \
- V(Mips64CeilWS) \
- V(Mips64TruncLS) \
- V(Mips64TruncLD) \
- V(Mips64TruncUwD) \
- V(Mips64TruncUwS) \
- V(Mips64TruncUlS) \
- V(Mips64TruncUlD) \
- V(Mips64CvtDW) \
- V(Mips64CvtSL) \
- V(Mips64CvtSW) \
- V(Mips64CvtSUw) \
- V(Mips64CvtSUl) \
- V(Mips64CvtDL) \
- V(Mips64CvtDUw) \
- V(Mips64CvtDUl) \
- V(Mips64Lb) \
- V(Mips64Lbu) \
- V(Mips64Sb) \
- V(Mips64Lh) \
- V(Mips64Ulh) \
- V(Mips64Lhu) \
- V(Mips64Ulhu) \
- V(Mips64Sh) \
- V(Mips64Ush) \
- V(Mips64Ld) \
- V(Mips64Uld) \
- V(Mips64Lw) \
- V(Mips64Ulw) \
- V(Mips64Lwu) \
- V(Mips64Ulwu) \
- V(Mips64Sw) \
- V(Mips64Usw) \
- V(Mips64Sd) \
- V(Mips64Usd) \
- V(Mips64Lwc1) \
- V(Mips64Ulwc1) \
- V(Mips64Swc1) \
- V(Mips64Uswc1) \
- V(Mips64Ldc1) \
- V(Mips64Uldc1) \
- V(Mips64Sdc1) \
- V(Mips64Usdc1) \
- V(Mips64BitcastDL) \
- V(Mips64BitcastLD) \
- V(Mips64Float64ExtractLowWord32) \
- V(Mips64Float64ExtractHighWord32) \
- V(Mips64Float64InsertLowWord32) \
- V(Mips64Float64InsertHighWord32) \
- V(Mips64Float32Max) \
- V(Mips64Float64Max) \
- V(Mips64Float32Min) \
- V(Mips64Float64Min) \
- V(Mips64Float64SilenceNaN) \
- V(Mips64Push) \
- V(Mips64Peek) \
- V(Mips64StoreToStackSlot) \
- V(Mips64ByteSwap64) \
- V(Mips64ByteSwap32) \
- V(Mips64StackClaim) \
- V(Mips64Seb) \
- V(Mips64Seh) \
- V(Mips64Sync) \
- V(Mips64AssertEqual) \
- V(Mips64S128Const) \
- V(Mips64S128Zero) \
- V(Mips64S128AllOnes) \
- V(Mips64I32x4Splat) \
- V(Mips64I32x4ExtractLane) \
- V(Mips64I32x4ReplaceLane) \
- V(Mips64I32x4Add) \
- V(Mips64I32x4Sub) \
- V(Mips64F64x2Abs) \
- V(Mips64F64x2Neg) \
- V(Mips64F32x4Splat) \
- V(Mips64F32x4ExtractLane) \
- V(Mips64F32x4ReplaceLane) \
- V(Mips64F32x4SConvertI32x4) \
- V(Mips64F32x4UConvertI32x4) \
- V(Mips64I32x4Mul) \
- V(Mips64I32x4MaxS) \
- V(Mips64I32x4MinS) \
- V(Mips64I32x4Eq) \
- V(Mips64I32x4Ne) \
- V(Mips64I32x4Shl) \
- V(Mips64I32x4ShrS) \
- V(Mips64I32x4ShrU) \
- V(Mips64I32x4MaxU) \
- V(Mips64I32x4MinU) \
- V(Mips64F64x2Sqrt) \
- V(Mips64F64x2Add) \
- V(Mips64F64x2Sub) \
- V(Mips64F64x2Mul) \
- V(Mips64F64x2Div) \
- V(Mips64F64x2Min) \
- V(Mips64F64x2Max) \
- V(Mips64F64x2Eq) \
- V(Mips64F64x2Ne) \
- V(Mips64F64x2Lt) \
- V(Mips64F64x2Le) \
- V(Mips64F64x2Splat) \
- V(Mips64F64x2ExtractLane) \
- V(Mips64F64x2ReplaceLane) \
- V(Mips64F64x2Pmin) \
- V(Mips64F64x2Pmax) \
- V(Mips64F64x2Ceil) \
- V(Mips64F64x2Floor) \
- V(Mips64F64x2Trunc) \
- V(Mips64F64x2NearestInt) \
- V(Mips64F64x2ConvertLowI32x4S) \
- V(Mips64F64x2ConvertLowI32x4U) \
- V(Mips64F64x2PromoteLowF32x4) \
- V(Mips64I64x2Splat) \
- V(Mips64I64x2ExtractLane) \
- V(Mips64I64x2ReplaceLane) \
- V(Mips64I64x2Add) \
- V(Mips64I64x2Sub) \
- V(Mips64I64x2Mul) \
- V(Mips64I64x2Neg) \
- V(Mips64I64x2Shl) \
- V(Mips64I64x2ShrS) \
- V(Mips64I64x2ShrU) \
- V(Mips64I64x2BitMask) \
- V(Mips64I64x2Eq) \
- V(Mips64I64x2Ne) \
- V(Mips64I64x2GtS) \
- V(Mips64I64x2GeS) \
- V(Mips64I64x2Abs) \
- V(Mips64I64x2SConvertI32x4Low) \
- V(Mips64I64x2SConvertI32x4High) \
- V(Mips64I64x2UConvertI32x4Low) \
- V(Mips64I64x2UConvertI32x4High) \
- V(Mips64ExtMulLow) \
- V(Mips64ExtMulHigh) \
- V(Mips64ExtAddPairwise) \
- V(Mips64F32x4Abs) \
- V(Mips64F32x4Neg) \
- V(Mips64F32x4Sqrt) \
- V(Mips64F32x4RecipApprox) \
- V(Mips64F32x4RecipSqrtApprox) \
- V(Mips64F32x4Add) \
- V(Mips64F32x4Sub) \
- V(Mips64F32x4Mul) \
- V(Mips64F32x4Div) \
- V(Mips64F32x4Max) \
- V(Mips64F32x4Min) \
- V(Mips64F32x4Eq) \
- V(Mips64F32x4Ne) \
- V(Mips64F32x4Lt) \
- V(Mips64F32x4Le) \
- V(Mips64F32x4Pmin) \
- V(Mips64F32x4Pmax) \
- V(Mips64F32x4Ceil) \
- V(Mips64F32x4Floor) \
- V(Mips64F32x4Trunc) \
- V(Mips64F32x4NearestInt) \
- V(Mips64F32x4DemoteF64x2Zero) \
- V(Mips64I32x4SConvertF32x4) \
- V(Mips64I32x4UConvertF32x4) \
- V(Mips64I32x4Neg) \
- V(Mips64I32x4GtS) \
- V(Mips64I32x4GeS) \
- V(Mips64I32x4GtU) \
- V(Mips64I32x4GeU) \
- V(Mips64I32x4Abs) \
- V(Mips64I32x4BitMask) \
- V(Mips64I32x4DotI16x8S) \
- V(Mips64I32x4TruncSatF64x2SZero) \
- V(Mips64I32x4TruncSatF64x2UZero) \
- V(Mips64I16x8Splat) \
- V(Mips64I16x8ExtractLaneU) \
- V(Mips64I16x8ExtractLaneS) \
- V(Mips64I16x8ReplaceLane) \
- V(Mips64I16x8Neg) \
- V(Mips64I16x8Shl) \
- V(Mips64I16x8ShrS) \
- V(Mips64I16x8ShrU) \
- V(Mips64I16x8Add) \
- V(Mips64I16x8AddSatS) \
- V(Mips64I16x8Sub) \
- V(Mips64I16x8SubSatS) \
- V(Mips64I16x8Mul) \
- V(Mips64I16x8MaxS) \
- V(Mips64I16x8MinS) \
- V(Mips64I16x8Eq) \
- V(Mips64I16x8Ne) \
- V(Mips64I16x8GtS) \
- V(Mips64I16x8GeS) \
- V(Mips64I16x8AddSatU) \
- V(Mips64I16x8SubSatU) \
- V(Mips64I16x8MaxU) \
- V(Mips64I16x8MinU) \
- V(Mips64I16x8GtU) \
- V(Mips64I16x8GeU) \
- V(Mips64I16x8RoundingAverageU) \
- V(Mips64I16x8Abs) \
- V(Mips64I16x8BitMask) \
- V(Mips64I16x8Q15MulRSatS) \
- V(Mips64I8x16Splat) \
- V(Mips64I8x16ExtractLaneU) \
- V(Mips64I8x16ExtractLaneS) \
- V(Mips64I8x16ReplaceLane) \
- V(Mips64I8x16Neg) \
- V(Mips64I8x16Shl) \
- V(Mips64I8x16ShrS) \
- V(Mips64I8x16Add) \
- V(Mips64I8x16AddSatS) \
- V(Mips64I8x16Sub) \
- V(Mips64I8x16SubSatS) \
- V(Mips64I8x16MaxS) \
- V(Mips64I8x16MinS) \
- V(Mips64I8x16Eq) \
- V(Mips64I8x16Ne) \
- V(Mips64I8x16GtS) \
- V(Mips64I8x16GeS) \
- V(Mips64I8x16ShrU) \
- V(Mips64I8x16AddSatU) \
- V(Mips64I8x16SubSatU) \
- V(Mips64I8x16MaxU) \
- V(Mips64I8x16MinU) \
- V(Mips64I8x16GtU) \
- V(Mips64I8x16GeU) \
- V(Mips64I8x16RoundingAverageU) \
- V(Mips64I8x16Abs) \
- V(Mips64I8x16Popcnt) \
- V(Mips64I8x16BitMask) \
- V(Mips64S128And) \
- V(Mips64S128Or) \
- V(Mips64S128Xor) \
- V(Mips64S128Not) \
- V(Mips64S128Select) \
- V(Mips64S128AndNot) \
- V(Mips64I64x2AllTrue) \
- V(Mips64I32x4AllTrue) \
- V(Mips64I16x8AllTrue) \
- V(Mips64I8x16AllTrue) \
- V(Mips64V128AnyTrue) \
- V(Mips64S32x4InterleaveRight) \
- V(Mips64S32x4InterleaveLeft) \
- V(Mips64S32x4PackEven) \
- V(Mips64S32x4PackOdd) \
- V(Mips64S32x4InterleaveEven) \
- V(Mips64S32x4InterleaveOdd) \
- V(Mips64S32x4Shuffle) \
- V(Mips64S16x8InterleaveRight) \
- V(Mips64S16x8InterleaveLeft) \
- V(Mips64S16x8PackEven) \
- V(Mips64S16x8PackOdd) \
- V(Mips64S16x8InterleaveEven) \
- V(Mips64S16x8InterleaveOdd) \
- V(Mips64S16x4Reverse) \
- V(Mips64S16x2Reverse) \
- V(Mips64S8x16InterleaveRight) \
- V(Mips64S8x16InterleaveLeft) \
- V(Mips64S8x16PackEven) \
- V(Mips64S8x16PackOdd) \
- V(Mips64S8x16InterleaveEven) \
- V(Mips64S8x16InterleaveOdd) \
- V(Mips64I8x16Shuffle) \
- V(Mips64I8x16Swizzle) \
- V(Mips64S8x16Concat) \
- V(Mips64S8x8Reverse) \
- V(Mips64S8x4Reverse) \
- V(Mips64S8x2Reverse) \
- V(Mips64S128LoadSplat) \
- V(Mips64S128Load8x8S) \
- V(Mips64S128Load8x8U) \
- V(Mips64S128Load16x4S) \
- V(Mips64S128Load16x4U) \
- V(Mips64S128Load32x2S) \
- V(Mips64S128Load32x2U) \
- V(Mips64S128Load32Zero) \
- V(Mips64S128Load64Zero) \
- V(Mips64S128LoadLane) \
- V(Mips64S128StoreLane) \
- V(Mips64MsaLd) \
- V(Mips64MsaSt) \
- V(Mips64I32x4SConvertI16x8Low) \
- V(Mips64I32x4SConvertI16x8High) \
- V(Mips64I32x4UConvertI16x8Low) \
- V(Mips64I32x4UConvertI16x8High) \
- V(Mips64I16x8SConvertI8x16Low) \
- V(Mips64I16x8SConvertI8x16High) \
- V(Mips64I16x8SConvertI32x4) \
- V(Mips64I16x8UConvertI32x4) \
- V(Mips64I16x8UConvertI8x16Low) \
- V(Mips64I16x8UConvertI8x16High) \
- V(Mips64I8x16SConvertI16x8) \
- V(Mips64I8x16UConvertI16x8) \
- V(Mips64Word64AtomicLoadUint8) \
- V(Mips64Word64AtomicLoadUint16) \
- V(Mips64Word64AtomicLoadUint32) \
- V(Mips64Word64AtomicLoadUint64) \
- V(Mips64Word64AtomicStoreWord8) \
- V(Mips64Word64AtomicStoreWord16) \
- V(Mips64Word64AtomicStoreWord32) \
- V(Mips64Word64AtomicStoreWord64) \
- V(Mips64Word64AtomicAddUint8) \
- V(Mips64Word64AtomicAddUint16) \
- V(Mips64Word64AtomicAddUint32) \
- V(Mips64Word64AtomicAddUint64) \
- V(Mips64Word64AtomicSubUint8) \
- V(Mips64Word64AtomicSubUint16) \
- V(Mips64Word64AtomicSubUint32) \
- V(Mips64Word64AtomicSubUint64) \
- V(Mips64Word64AtomicAndUint8) \
- V(Mips64Word64AtomicAndUint16) \
- V(Mips64Word64AtomicAndUint32) \
- V(Mips64Word64AtomicAndUint64) \
- V(Mips64Word64AtomicOrUint8) \
- V(Mips64Word64AtomicOrUint16) \
- V(Mips64Word64AtomicOrUint32) \
- V(Mips64Word64AtomicOrUint64) \
- V(Mips64Word64AtomicXorUint8) \
- V(Mips64Word64AtomicXorUint16) \
- V(Mips64Word64AtomicXorUint32) \
- V(Mips64Word64AtomicXorUint64) \
- V(Mips64Word64AtomicExchangeUint8) \
- V(Mips64Word64AtomicExchangeUint16) \
- V(Mips64Word64AtomicExchangeUint32) \
- V(Mips64Word64AtomicExchangeUint64) \
- V(Mips64Word64AtomicCompareExchangeUint8) \
- V(Mips64Word64AtomicCompareExchangeUint16) \
- V(Mips64Word64AtomicCompareExchangeUint32) \
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(Mips64Add) \
+ V(Mips64Dadd) \
+ V(Mips64DaddOvf) \
+ V(Mips64Sub) \
+ V(Mips64Dsub) \
+ V(Mips64DsubOvf) \
+ V(Mips64Mul) \
+ V(Mips64MulOvf) \
+ V(Mips64MulHigh) \
+ V(Mips64DMulHigh) \
+ V(Mips64MulHighU) \
+ V(Mips64Dmul) \
+ V(Mips64Div) \
+ V(Mips64Ddiv) \
+ V(Mips64DivU) \
+ V(Mips64DdivU) \
+ V(Mips64Mod) \
+ V(Mips64Dmod) \
+ V(Mips64ModU) \
+ V(Mips64DmodU) \
+ V(Mips64And) \
+ V(Mips64And32) \
+ V(Mips64Or) \
+ V(Mips64Or32) \
+ V(Mips64Nor) \
+ V(Mips64Nor32) \
+ V(Mips64Xor) \
+ V(Mips64Xor32) \
+ V(Mips64Clz) \
+ V(Mips64Lsa) \
+ V(Mips64Dlsa) \
+ V(Mips64Shl) \
+ V(Mips64Shr) \
+ V(Mips64Sar) \
+ V(Mips64Ext) \
+ V(Mips64Ins) \
+ V(Mips64Dext) \
+ V(Mips64Dins) \
+ V(Mips64Dclz) \
+ V(Mips64Ctz) \
+ V(Mips64Dctz) \
+ V(Mips64Popcnt) \
+ V(Mips64Dpopcnt) \
+ V(Mips64Dshl) \
+ V(Mips64Dshr) \
+ V(Mips64Dsar) \
+ V(Mips64Ror) \
+ V(Mips64Dror) \
+ V(Mips64Mov) \
+ V(Mips64Tst) \
+ V(Mips64Cmp) \
+ V(Mips64CmpS) \
+ V(Mips64AddS) \
+ V(Mips64SubS) \
+ V(Mips64MulS) \
+ V(Mips64DivS) \
+ V(Mips64AbsS) \
+ V(Mips64NegS) \
+ V(Mips64SqrtS) \
+ V(Mips64MaxS) \
+ V(Mips64MinS) \
+ V(Mips64CmpD) \
+ V(Mips64AddD) \
+ V(Mips64SubD) \
+ V(Mips64MulD) \
+ V(Mips64DivD) \
+ V(Mips64ModD) \
+ V(Mips64AbsD) \
+ V(Mips64NegD) \
+ V(Mips64SqrtD) \
+ V(Mips64MaxD) \
+ V(Mips64MinD) \
+ V(Mips64Float64RoundDown) \
+ V(Mips64Float64RoundTruncate) \
+ V(Mips64Float64RoundUp) \
+ V(Mips64Float64RoundTiesEven) \
+ V(Mips64Float32RoundDown) \
+ V(Mips64Float32RoundTruncate) \
+ V(Mips64Float32RoundUp) \
+ V(Mips64Float32RoundTiesEven) \
+ V(Mips64CvtSD) \
+ V(Mips64CvtDS) \
+ V(Mips64TruncWD) \
+ V(Mips64RoundWD) \
+ V(Mips64FloorWD) \
+ V(Mips64CeilWD) \
+ V(Mips64TruncWS) \
+ V(Mips64RoundWS) \
+ V(Mips64FloorWS) \
+ V(Mips64CeilWS) \
+ V(Mips64TruncLS) \
+ V(Mips64TruncLD) \
+ V(Mips64TruncUwD) \
+ V(Mips64TruncUwS) \
+ V(Mips64TruncUlS) \
+ V(Mips64TruncUlD) \
+ V(Mips64CvtDW) \
+ V(Mips64CvtSL) \
+ V(Mips64CvtSW) \
+ V(Mips64CvtSUw) \
+ V(Mips64CvtSUl) \
+ V(Mips64CvtDL) \
+ V(Mips64CvtDUw) \
+ V(Mips64CvtDUl) \
+ V(Mips64Lb) \
+ V(Mips64Lbu) \
+ V(Mips64Sb) \
+ V(Mips64Lh) \
+ V(Mips64Ulh) \
+ V(Mips64Lhu) \
+ V(Mips64Ulhu) \
+ V(Mips64Sh) \
+ V(Mips64Ush) \
+ V(Mips64Ld) \
+ V(Mips64Uld) \
+ V(Mips64Lw) \
+ V(Mips64Ulw) \
+ V(Mips64Lwu) \
+ V(Mips64Ulwu) \
+ V(Mips64Sw) \
+ V(Mips64Usw) \
+ V(Mips64Sd) \
+ V(Mips64Usd) \
+ V(Mips64Lwc1) \
+ V(Mips64Ulwc1) \
+ V(Mips64Swc1) \
+ V(Mips64Uswc1) \
+ V(Mips64Ldc1) \
+ V(Mips64Uldc1) \
+ V(Mips64Sdc1) \
+ V(Mips64Usdc1) \
+ V(Mips64BitcastDL) \
+ V(Mips64BitcastLD) \
+ V(Mips64Float64ExtractLowWord32) \
+ V(Mips64Float64ExtractHighWord32) \
+ V(Mips64Float64InsertLowWord32) \
+ V(Mips64Float64InsertHighWord32) \
+ V(Mips64Float32Max) \
+ V(Mips64Float64Max) \
+ V(Mips64Float32Min) \
+ V(Mips64Float64Min) \
+ V(Mips64Float64SilenceNaN) \
+ V(Mips64Push) \
+ V(Mips64Peek) \
+ V(Mips64StoreToStackSlot) \
+ V(Mips64ByteSwap64) \
+ V(Mips64ByteSwap32) \
+ V(Mips64StackClaim) \
+ V(Mips64Seb) \
+ V(Mips64Seh) \
+ V(Mips64Sync) \
+ V(Mips64AssertEqual) \
+ V(Mips64S128Const) \
+ V(Mips64S128Zero) \
+ V(Mips64S128AllOnes) \
+ V(Mips64I32x4Splat) \
+ V(Mips64I32x4ExtractLane) \
+ V(Mips64I32x4ReplaceLane) \
+ V(Mips64I32x4Add) \
+ V(Mips64I32x4Sub) \
+ V(Mips64F64x2Abs) \
+ V(Mips64F64x2Neg) \
+ V(Mips64F32x4Splat) \
+ V(Mips64F32x4ExtractLane) \
+ V(Mips64F32x4ReplaceLane) \
+ V(Mips64F32x4SConvertI32x4) \
+ V(Mips64F32x4UConvertI32x4) \
+ V(Mips64I32x4Mul) \
+ V(Mips64I32x4MaxS) \
+ V(Mips64I32x4MinS) \
+ V(Mips64I32x4Eq) \
+ V(Mips64I32x4Ne) \
+ V(Mips64I32x4Shl) \
+ V(Mips64I32x4ShrS) \
+ V(Mips64I32x4ShrU) \
+ V(Mips64I32x4MaxU) \
+ V(Mips64I32x4MinU) \
+ V(Mips64F64x2Sqrt) \
+ V(Mips64F64x2Add) \
+ V(Mips64F64x2Sub) \
+ V(Mips64F64x2Mul) \
+ V(Mips64F64x2Div) \
+ V(Mips64F64x2Min) \
+ V(Mips64F64x2Max) \
+ V(Mips64F64x2Eq) \
+ V(Mips64F64x2Ne) \
+ V(Mips64F64x2Lt) \
+ V(Mips64F64x2Le) \
+ V(Mips64F64x2Splat) \
+ V(Mips64F64x2ExtractLane) \
+ V(Mips64F64x2ReplaceLane) \
+ V(Mips64F64x2Pmin) \
+ V(Mips64F64x2Pmax) \
+ V(Mips64F64x2Ceil) \
+ V(Mips64F64x2Floor) \
+ V(Mips64F64x2Trunc) \
+ V(Mips64F64x2NearestInt) \
+ V(Mips64F64x2ConvertLowI32x4S) \
+ V(Mips64F64x2ConvertLowI32x4U) \
+ V(Mips64F64x2PromoteLowF32x4) \
+ V(Mips64I64x2Splat) \
+ V(Mips64I64x2ExtractLane) \
+ V(Mips64I64x2ReplaceLane) \
+ V(Mips64I64x2Add) \
+ V(Mips64I64x2Sub) \
+ V(Mips64I64x2Mul) \
+ V(Mips64I64x2Neg) \
+ V(Mips64I64x2Shl) \
+ V(Mips64I64x2ShrS) \
+ V(Mips64I64x2ShrU) \
+ V(Mips64I64x2BitMask) \
+ V(Mips64I64x2Eq) \
+ V(Mips64I64x2Ne) \
+ V(Mips64I64x2GtS) \
+ V(Mips64I64x2GeS) \
+ V(Mips64I64x2Abs) \
+ V(Mips64I64x2SConvertI32x4Low) \
+ V(Mips64I64x2SConvertI32x4High) \
+ V(Mips64I64x2UConvertI32x4Low) \
+ V(Mips64I64x2UConvertI32x4High) \
+ V(Mips64ExtMulLow) \
+ V(Mips64ExtMulHigh) \
+ V(Mips64ExtAddPairwise) \
+ V(Mips64F32x4Abs) \
+ V(Mips64F32x4Neg) \
+ V(Mips64F32x4Sqrt) \
+ V(Mips64F32x4RecipApprox) \
+ V(Mips64F32x4RecipSqrtApprox) \
+ V(Mips64F32x4Add) \
+ V(Mips64F32x4Sub) \
+ V(Mips64F32x4Mul) \
+ V(Mips64F32x4Div) \
+ V(Mips64F32x4Max) \
+ V(Mips64F32x4Min) \
+ V(Mips64F32x4Eq) \
+ V(Mips64F32x4Ne) \
+ V(Mips64F32x4Lt) \
+ V(Mips64F32x4Le) \
+ V(Mips64F32x4Pmin) \
+ V(Mips64F32x4Pmax) \
+ V(Mips64F32x4Ceil) \
+ V(Mips64F32x4Floor) \
+ V(Mips64F32x4Trunc) \
+ V(Mips64F32x4NearestInt) \
+ V(Mips64F32x4DemoteF64x2Zero) \
+ V(Mips64I32x4SConvertF32x4) \
+ V(Mips64I32x4UConvertF32x4) \
+ V(Mips64I32x4Neg) \
+ V(Mips64I32x4GtS) \
+ V(Mips64I32x4GeS) \
+ V(Mips64I32x4GtU) \
+ V(Mips64I32x4GeU) \
+ V(Mips64I32x4Abs) \
+ V(Mips64I32x4BitMask) \
+ V(Mips64I32x4DotI16x8S) \
+ V(Mips64I32x4TruncSatF64x2SZero) \
+ V(Mips64I32x4TruncSatF64x2UZero) \
+ V(Mips64I16x8Splat) \
+ V(Mips64I16x8ExtractLaneU) \
+ V(Mips64I16x8ExtractLaneS) \
+ V(Mips64I16x8ReplaceLane) \
+ V(Mips64I16x8Neg) \
+ V(Mips64I16x8Shl) \
+ V(Mips64I16x8ShrS) \
+ V(Mips64I16x8ShrU) \
+ V(Mips64I16x8Add) \
+ V(Mips64I16x8AddSatS) \
+ V(Mips64I16x8Sub) \
+ V(Mips64I16x8SubSatS) \
+ V(Mips64I16x8Mul) \
+ V(Mips64I16x8MaxS) \
+ V(Mips64I16x8MinS) \
+ V(Mips64I16x8Eq) \
+ V(Mips64I16x8Ne) \
+ V(Mips64I16x8GtS) \
+ V(Mips64I16x8GeS) \
+ V(Mips64I16x8AddSatU) \
+ V(Mips64I16x8SubSatU) \
+ V(Mips64I16x8MaxU) \
+ V(Mips64I16x8MinU) \
+ V(Mips64I16x8GtU) \
+ V(Mips64I16x8GeU) \
+ V(Mips64I16x8RoundingAverageU) \
+ V(Mips64I16x8Abs) \
+ V(Mips64I16x8BitMask) \
+ V(Mips64I16x8Q15MulRSatS) \
+ V(Mips64I8x16Splat) \
+ V(Mips64I8x16ExtractLaneU) \
+ V(Mips64I8x16ExtractLaneS) \
+ V(Mips64I8x16ReplaceLane) \
+ V(Mips64I8x16Neg) \
+ V(Mips64I8x16Shl) \
+ V(Mips64I8x16ShrS) \
+ V(Mips64I8x16Add) \
+ V(Mips64I8x16AddSatS) \
+ V(Mips64I8x16Sub) \
+ V(Mips64I8x16SubSatS) \
+ V(Mips64I8x16MaxS) \
+ V(Mips64I8x16MinS) \
+ V(Mips64I8x16Eq) \
+ V(Mips64I8x16Ne) \
+ V(Mips64I8x16GtS) \
+ V(Mips64I8x16GeS) \
+ V(Mips64I8x16ShrU) \
+ V(Mips64I8x16AddSatU) \
+ V(Mips64I8x16SubSatU) \
+ V(Mips64I8x16MaxU) \
+ V(Mips64I8x16MinU) \
+ V(Mips64I8x16GtU) \
+ V(Mips64I8x16GeU) \
+ V(Mips64I8x16RoundingAverageU) \
+ V(Mips64I8x16Abs) \
+ V(Mips64I8x16Popcnt) \
+ V(Mips64I8x16BitMask) \
+ V(Mips64S128And) \
+ V(Mips64S128Or) \
+ V(Mips64S128Xor) \
+ V(Mips64S128Not) \
+ V(Mips64S128Select) \
+ V(Mips64S128AndNot) \
+ V(Mips64I64x2AllTrue) \
+ V(Mips64I32x4AllTrue) \
+ V(Mips64I16x8AllTrue) \
+ V(Mips64I8x16AllTrue) \
+ V(Mips64V128AnyTrue) \
+ V(Mips64S32x4InterleaveRight) \
+ V(Mips64S32x4InterleaveLeft) \
+ V(Mips64S32x4PackEven) \
+ V(Mips64S32x4PackOdd) \
+ V(Mips64S32x4InterleaveEven) \
+ V(Mips64S32x4InterleaveOdd) \
+ V(Mips64S32x4Shuffle) \
+ V(Mips64S16x8InterleaveRight) \
+ V(Mips64S16x8InterleaveLeft) \
+ V(Mips64S16x8PackEven) \
+ V(Mips64S16x8PackOdd) \
+ V(Mips64S16x8InterleaveEven) \
+ V(Mips64S16x8InterleaveOdd) \
+ V(Mips64S16x4Reverse) \
+ V(Mips64S16x2Reverse) \
+ V(Mips64S8x16InterleaveRight) \
+ V(Mips64S8x16InterleaveLeft) \
+ V(Mips64S8x16PackEven) \
+ V(Mips64S8x16PackOdd) \
+ V(Mips64S8x16InterleaveEven) \
+ V(Mips64S8x16InterleaveOdd) \
+ V(Mips64I8x16Shuffle) \
+ V(Mips64I8x16Swizzle) \
+ V(Mips64S8x16Concat) \
+ V(Mips64S8x8Reverse) \
+ V(Mips64S8x4Reverse) \
+ V(Mips64S8x2Reverse) \
+ V(Mips64S128LoadSplat) \
+ V(Mips64S128Load8x8S) \
+ V(Mips64S128Load8x8U) \
+ V(Mips64S128Load16x4S) \
+ V(Mips64S128Load16x4U) \
+ V(Mips64S128Load32x2S) \
+ V(Mips64S128Load32x2U) \
+ V(Mips64S128Load32Zero) \
+ V(Mips64S128Load64Zero) \
+ V(Mips64S128LoadLane) \
+ V(Mips64S128StoreLane) \
+ V(Mips64MsaLd) \
+ V(Mips64MsaSt) \
+ V(Mips64I32x4SConvertI16x8Low) \
+ V(Mips64I32x4SConvertI16x8High) \
+ V(Mips64I32x4UConvertI16x8Low) \
+ V(Mips64I32x4UConvertI16x8High) \
+ V(Mips64I16x8SConvertI8x16Low) \
+ V(Mips64I16x8SConvertI8x16High) \
+ V(Mips64I16x8SConvertI32x4) \
+ V(Mips64I16x8UConvertI32x4) \
+ V(Mips64I16x8UConvertI8x16Low) \
+ V(Mips64I16x8UConvertI8x16High) \
+ V(Mips64I8x16SConvertI16x8) \
+ V(Mips64I8x16UConvertI16x8) \
+ V(Mips64StoreCompressTagged) \
+ V(Mips64Word64AtomicLoadUint64) \
+ V(Mips64Word64AtomicStoreWord64) \
+ V(Mips64Word64AtomicAddUint64) \
+ V(Mips64Word64AtomicSubUint64) \
+ V(Mips64Word64AtomicAndUint64) \
+ V(Mips64Word64AtomicOrUint64) \
+ V(Mips64Word64AtomicXorUint64) \
+ V(Mips64Word64AtomicExchangeUint64) \
V(Mips64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index c63e0aa3d3..f79e334ed6 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -375,9 +375,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64S128Load32Zero:
case kMips64S128Load64Zero:
case kMips64S128LoadLane:
- case kMips64Word64AtomicLoadUint8:
- case kMips64Word64AtomicLoadUint16:
- case kMips64Word64AtomicLoadUint32:
case kMips64Word64AtomicLoadUint64:
return kIsLoadOperation;
@@ -400,37 +397,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64Uswc1:
case kMips64Sync:
case kMips64S128StoreLane:
- case kMips64Word64AtomicStoreWord8:
- case kMips64Word64AtomicStoreWord16:
- case kMips64Word64AtomicStoreWord32:
+ case kMips64StoreCompressTagged:
case kMips64Word64AtomicStoreWord64:
- case kMips64Word64AtomicAddUint8:
- case kMips64Word64AtomicAddUint16:
- case kMips64Word64AtomicAddUint32:
case kMips64Word64AtomicAddUint64:
- case kMips64Word64AtomicSubUint8:
- case kMips64Word64AtomicSubUint16:
- case kMips64Word64AtomicSubUint32:
case kMips64Word64AtomicSubUint64:
- case kMips64Word64AtomicAndUint8:
- case kMips64Word64AtomicAndUint16:
- case kMips64Word64AtomicAndUint32:
case kMips64Word64AtomicAndUint64:
- case kMips64Word64AtomicOrUint8:
- case kMips64Word64AtomicOrUint16:
- case kMips64Word64AtomicOrUint32:
case kMips64Word64AtomicOrUint64:
- case kMips64Word64AtomicXorUint8:
- case kMips64Word64AtomicXorUint16:
- case kMips64Word64AtomicXorUint32:
case kMips64Word64AtomicXorUint64:
- case kMips64Word64AtomicExchangeUint8:
- case kMips64Word64AtomicExchangeUint16:
- case kMips64Word64AtomicExchangeUint32:
case kMips64Word64AtomicExchangeUint64:
- case kMips64Word64AtomicCompareExchangeUint8:
- case kMips64Word64AtomicCompareExchangeUint16:
- case kMips64Word64AtomicCompareExchangeUint32:
case kMips64Word64AtomicCompareExchangeUint64:
return kHasSideEffect;
@@ -1352,8 +1326,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return DadduLatency(false) + AndLatency(false) + AssertLatency() +
DadduLatency(false) + AndLatency(false) + BranchShortLatency() +
1 + DsubuLatency() + DadduLatency();
- case kArchWordPoisonOnSpeculation:
- return AndLatency();
case kIeee754Float64Acos:
case kIeee754Float64Acosh:
case kIeee754Float64Asin:
@@ -1740,35 +1712,35 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return ByteSwapSignedLatency();
case kMips64ByteSwap32:
return ByteSwapSignedLatency();
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
return 2;
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
return 3;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
return Word32AtomicExchangeLatency(true, 8);
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
return Word32AtomicExchangeLatency(false, 8);
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
return Word32AtomicExchangeLatency(true, 16);
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
return Word32AtomicExchangeLatency(false, 16);
- case kWord32AtomicExchangeWord32:
+ case kAtomicExchangeWord32:
return 2 + LlLatency(0) + 1 + ScLatency(0) + BranchShortLatency() + 1;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
return Word32AtomicCompareExchangeLatency(true, 8);
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
return Word32AtomicCompareExchangeLatency(false, 8);
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
return Word32AtomicCompareExchangeLatency(true, 16);
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
return Word32AtomicCompareExchangeLatency(false, 16);
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
return 3 + LlLatency(0) + BranchShortLatency() + 1 + ScLatency(0) +
BranchShortLatency() + 1;
case kMips64AssertEqual:
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index bec7bbefdc..192f82c9db 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -515,16 +515,10 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
EmitLoad(this, node, opcode);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -2041,10 +2035,13 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
bool IsNodeUnsigned(Node* n) {
NodeMatcher m(n);
- if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() ||
- m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
+ if (m.IsLoad() || m.IsUnalignedLoad() || m.IsProtectedLoad()) {
LoadRepresentation load_rep = LoadRepresentationOf(n->op());
return load_rep.IsUnsigned();
+ } else if (m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(n->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ return load_rep.IsUnsigned();
} else {
return m.IsUint32Div() || m.IsUint32LessThan() ||
m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
@@ -2144,12 +2141,43 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- if (g.CanBeImmediate(index, opcode)) {
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+
+ // The memory order is ignored.
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ InstructionCode code;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicLoadWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ code = kMips64Word64AtomicLoadUint64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(kTaggedSize, 8);
+ code = kMips64Word64AtomicLoadUint64;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (g.CanBeImmediate(index, code)) {
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.DefineAsRegister(node), g.UseRegister(base),
g.UseImmediate(index));
} else {
@@ -2157,35 +2185,93 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
}
}
void VisitAtomicStore(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- if (g.CanBeImmediate(index, opcode)) {
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
- g.UseRegisterOrImmediateZero(value));
+ // The memory order is ignored.
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
+ MachineRepresentation rep = store_params.representation();
+
+ if (FLAG_enable_unconditional_write_barriers &&
+ CanBeTaggedOrCompressedPointer(rep)) {
+ write_barrier_kind = kFullWriteBarrier;
+ }
+
+ InstructionCode code;
+
+ if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
+ DCHECK(CanBeTaggedPointer(rep));
+ DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
+
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode =
+ WriteBarrierKindToRecordWriteMode(write_barrier_kind);
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ code = kArchAtomicStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- InstructionOperand addr_reg = g.TempRegister();
- selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
- addr_reg, g.UseRegister(index), g.UseRegister(base));
- // Emit desired store opcode, using temp addr_reg.
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.NoOutput(), addr_reg, g.TempImmediate(0),
- g.UseRegisterOrImmediateZero(value));
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ code = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ code = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicStoreWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ DCHECK_EQ(width, AtomicWidth::kWord64);
+ code = kMips64Word64AtomicStoreWord64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
+ code = kMips64StoreCompressTagged;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ code |= AtomicWidthField::encode(width);
+
+ if (g.CanBeImmediate(index, code)) {
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.NoOutput(), addr_reg, g.TempImmediate(0),
+ g.UseRegisterOrImmediateZero(value));
+ }
}
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2203,12 +2289,13 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
temp[0] = g.TempRegister();
temp[1] = g.TempRegister();
temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2228,12 +2315,13 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
temp[0] = g.TempRegister();
temp[1] = g.TempRegister();
temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2252,7 +2340,8 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
temps[1] = g.TempRegister();
temps[2] = g.TempRegister();
temps[3] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
}
@@ -2615,163 +2704,93 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
- break;
- default:
- UNREACHABLE();
- }
-
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = kMips64Word64AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kMips64Word64AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kMips64Word64AtomicLoadUint32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kMips64Word64AtomicLoadUint64;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kMips64Word64AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kMips64Word64AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kMips64Word64AtomicStoreWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kMips64Word64AtomicStoreWord64;
- break;
- default:
- UNREACHABLE();
- }
-
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kMips64Word64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kMips64Word64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kMips64Word64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kMips64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kMips64Word64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kMips64Word64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kMips64Word64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kMips64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
@@ -2792,15 +2811,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2825,14 +2843,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
#define VISIT_ATOMIC_BINOP(op) \
void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kMips64Word64Atomic##op##Uint8, kMips64Word64Atomic##op##Uint16, \
- kMips64Word64Atomic##op##Uint32, kMips64Word64Atomic##op##Uint64); \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kMips64Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index cf324353f2..0bf29ba686 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -38,9 +38,7 @@ class PPCOperandConverter final : public InstructionOperandConverter {
RCBit OutputRCBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
- case kFlags_branch_and_poison:
case kFlags_deoptimize:
- case kFlags_deoptimize_and_poison:
case kFlags_set:
case kFlags_trap:
case kFlags_select:
@@ -289,15 +287,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
- PPCOperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(instr->opcode());
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->and_(value, value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr, round) \
@@ -777,25 +766,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, cr0);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- Register scratch = kScratchReg;
-
- __ ComputeCodeStartAddress(scratch);
-
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ CmpS64(kJavaScriptCallCodeStartRegister, scratch);
- __ li(scratch, Operand::Zero());
- __ notx(kSpeculationPoisonRegister, scratch);
- __ isel(eq, kSpeculationPoisonRegister, kSpeculationPoisonRegister, scratch);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ and_(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ and_(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ and_(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -1164,10 +1134,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(offset.offset()), r0);
break;
}
- case kArchWordPoisonOnSpeculation:
- __ and_(i.OutputRegister(), i.InputRegister(0),
- kSpeculationPoisonRegister);
- break;
case kPPC_Peek: {
int reverse_slot = i.InputInt32(0);
int offset =
@@ -1953,10 +1919,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_BitcastFloat32ToInt32:
- __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0),
+ kScratchDoubleReg);
break;
case kPPC_BitcastInt32ToFloat32:
- __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
+ __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0), ip);
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_BitcastDoubleToInt64:
@@ -1968,33 +1935,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
case kPPC_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
__ extsb(i.OutputRegister(), i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordU16:
ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(lha, lhax);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordU32:
ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(lwa, lwax);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_LoadWord64:
ASSEMBLE_LOAD_INTEGER(ld, ldx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
#endif
case kPPC_LoadFloat32:
@@ -2051,25 +2011,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
}
- case kWord32AtomicLoadInt8:
- case kPPC_AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kPPC_AtomicLoadUint16:
- case kPPC_AtomicLoadWord32:
- case kPPC_AtomicLoadWord64:
- case kPPC_AtomicStoreUint8:
- case kPPC_AtomicStoreUint16:
- case kPPC_AtomicStoreWord32:
- case kPPC_AtomicStoreWord64:
+ case kAtomicLoadInt8:
+ case kAtomicLoadInt16:
UNREACHABLE();
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
__ extsb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kPPC_AtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
__ extsh(i.OutputRegister(0), i.OutputRegister(0));
break;
@@ -2082,13 +2034,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kPPC_AtomicExchangeWord64:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldarx, stdcx);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(CmpS64, lbarx, stbcx, extsb);
break;
case kPPC_AtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(CmpS64, lbarx, stbcx, ZeroExtByte);
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(CmpS64, lharx, sthcx, extsh);
break;
case kPPC_AtomicCompareExchangeUint16:
@@ -2135,6 +2087,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register input = i.InputRegister(0);
Register output = i.OutputRegister();
Register temp1 = r0;
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ __ brw(output, input);
+ break;
+ }
__ rotlwi(temp1, input, 8);
__ rlwimi(temp1, input, 24, 0, 7);
__ rlwimi(temp1, input, 24, 16, 23);
@@ -2143,7 +2099,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kPPC_LoadByteRev32: {
ASSEMBLE_LOAD_INTEGER_RR(lwbrx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
}
case kPPC_StoreByteRev32: {
@@ -2156,6 +2111,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register temp1 = r0;
Register temp2 = kScratchReg;
Register temp3 = i.TempRegister(0);
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ __ brd(output, input);
+ break;
+ }
__ rldicl(temp1, input, 32, 32);
__ rotlwi(temp2, input, 8);
__ rlwimi(temp2, input, 24, 0, 7);
@@ -2169,7 +2128,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kPPC_LoadByteRev64: {
ASSEMBLE_LOAD_INTEGER_RR(ldbrx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
}
case kPPC_StoreByteRev64: {
@@ -2186,7 +2144,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kPPC_F32x4Splat: {
Simd128Register dst = i.OutputSimd128Register();
- __ MovFloatToInt(kScratchReg, i.InputDoubleRegister(0));
+ __ MovFloatToInt(kScratchReg, i.InputDoubleRegister(0),
+ kScratchDoubleReg);
__ mtvsrd(dst, kScratchReg);
__ vspltw(dst, dst, Operand(1));
break;
@@ -2229,7 +2188,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vextractuw(kScratchSimd128Reg, i.InputSimd128Register(0),
Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
__ mfvsrd(kScratchReg, kScratchSimd128Reg);
- __ MovIntToFloat(i.OutputDoubleRegister(), kScratchReg);
+ __ MovIntToFloat(i.OutputDoubleRegister(), kScratchReg, ip);
break;
}
case kPPC_I64x2ExtractLane: {
@@ -2292,7 +2251,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 4;
Simd128Register dst = i.OutputSimd128Register();
- __ MovFloatToInt(r0, i.InputDoubleRegister(2));
+ __ MovFloatToInt(r0, i.InputDoubleRegister(2), kScratchDoubleReg);
if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
__ vinsw(dst, r0, Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
} else {
@@ -3522,7 +3481,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemOperand operand = i.MemoryOperand(&mode, &index);
DCHECK_EQ(mode, kMode_MRR);
__ vextractub(kScratchSimd128Reg, i.InputSimd128Register(0),
- Operand(15 - i.InputInt8(3)));
+ Operand(15 - i.InputUint8(3)));
__ stxsibx(kScratchSimd128Reg, operand);
break;
}
@@ -3799,21 +3758,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(John) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual ||
- condition == kOverflow || condition == kNotOverflow) {
- return;
- }
-
- ArchOpcode op = instr->arch_opcode();
- condition = NegateFlagsCondition(condition);
- __ li(kScratchReg, Operand::Zero());
- __ isel(FlagsConditionToCondition(condition, op), kSpeculationPoisonRegister,
- kScratchReg, kSpeculationPoisonRegister, cr0);
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3940,7 +3884,6 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
break;
default:
UNREACHABLE();
- break;
}
} else {
if (reg_value != 0) __ li(reg, Operand::Zero());
@@ -4079,7 +4022,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
@@ -4353,7 +4295,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC.
- break;
}
if (destination->IsStackSlot()) {
__ StoreU64(dst, g.ToMemOperand(destination), r0);
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index 64f532a52b..4182e8b71b 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -138,17 +138,6 @@ namespace compiler {
V(PPC_StoreSimd128) \
V(PPC_ByteRev32) \
V(PPC_ByteRev64) \
- V(PPC_CompressSigned) \
- V(PPC_CompressPointer) \
- V(PPC_CompressAny) \
- V(PPC_AtomicStoreUint8) \
- V(PPC_AtomicStoreUint16) \
- V(PPC_AtomicStoreWord32) \
- V(PPC_AtomicStoreWord64) \
- V(PPC_AtomicLoadUint8) \
- V(PPC_AtomicLoadUint16) \
- V(PPC_AtomicLoadWord32) \
- V(PPC_AtomicLoadWord64) \
V(PPC_AtomicExchangeUint8) \
V(PPC_AtomicExchangeUint16) \
V(PPC_AtomicExchangeWord32) \
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index aeb1377879..0270dc401e 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -112,9 +112,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_BitcastDoubleToInt64:
case kPPC_ByteRev32:
case kPPC_ByteRev64:
- case kPPC_CompressSigned:
- case kPPC_CompressPointer:
- case kPPC_CompressAny:
case kPPC_F64x2Splat:
case kPPC_F64x2ExtractLane:
case kPPC_F64x2ReplaceLane:
@@ -332,10 +329,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_LoadFloat32:
case kPPC_LoadDouble:
case kPPC_LoadSimd128:
- case kPPC_AtomicLoadUint8:
- case kPPC_AtomicLoadUint16:
- case kPPC_AtomicLoadWord32:
- case kPPC_AtomicLoadWord64:
case kPPC_Peek:
case kPPC_LoadDecompressTaggedSigned:
case kPPC_LoadDecompressTaggedPointer:
@@ -378,10 +371,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_S128Store64Lane:
return kHasSideEffect;
- case kPPC_AtomicStoreUint8:
- case kPPC_AtomicStoreUint16:
- case kPPC_AtomicStoreWord32:
- case kPPC_AtomicStoreWord64:
case kPPC_AtomicExchangeUint8:
case kPPC_AtomicExchangeUint16:
case kPPC_AtomicExchangeWord32:
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index c74211aa38..bfa7c0a6e0 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -167,9 +167,9 @@ void InstructionSelector::VisitAbortCSAAssert(Node* node) {
Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r4));
}
-void InstructionSelector::VisitLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- PPCOperandGenerator g(this);
+static void VisitLoadCommon(InstructionSelector* selector, Node* node,
+ LoadRepresentation load_rep) {
+ PPCOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* offset = node->InputAt(1);
InstructionCode opcode = kArchNop;
@@ -229,54 +229,51 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad &&
- poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
-
bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicLoad ||
node->opcode() == IrOpcode::kWord64AtomicLoad);
if (g.CanBeImmediate(offset, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseImmediate(offset), g.UseImmediate(is_atomic));
} else if (g.CanBeImmediate(base, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(offset),
+ g.UseImmediate(base), g.UseImmediate(is_atomic));
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseRegister(offset), g.UseImmediate(is_atomic));
}
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ VisitLoadCommon(this, node, load_rep);
+}
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
}
-void InstructionSelector::VisitStore(Node* node) {
- PPCOperandGenerator g(this);
+void VisitStoreCommon(InstructionSelector* selector, Node* node,
+ StoreRepresentation store_rep,
+ base::Optional<AtomicMemoryOrder> atomic_order) {
+ PPCOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* offset = node->InputAt(1);
Node* value = node->InputAt(2);
+ // TODO(miladfarca): maybe use atomic_order?
bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicStore ||
node->opcode() == IrOpcode::kWord64AtomicStore);
- MachineRepresentation rep;
+ MachineRepresentation rep = store_rep.representation();
WriteBarrierKind write_barrier_kind = kNoWriteBarrier;
- if (is_atomic) {
- rep = AtomicStoreRepresentationOf(node->op());
- } else {
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ if (!is_atomic) {
write_barrier_kind = store_rep.write_barrier_kind();
- rep = store_rep.representation();
}
if (FLAG_enable_unconditional_write_barriers &&
@@ -312,7 +309,7 @@ void InstructionSelector::VisitStore(Node* node) {
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
CHECK_EQ(is_atomic, false);
- Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
ArchOpcode opcode;
ImmediateMode mode = kInt16Imm;
@@ -346,7 +343,6 @@ void InstructionSelector::VisitStore(Node* node) {
break;
#else
UNREACHABLE();
- break;
#endif
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
@@ -374,21 +370,26 @@ void InstructionSelector::VisitStore(Node* node) {
}
if (g.CanBeImmediate(offset, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.NoOutput(), g.UseRegister(base), g.UseImmediate(offset),
+ g.UseRegister(value), g.UseImmediate(is_atomic));
} else if (g.CanBeImmediate(base, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.NoOutput(), g.UseRegister(offset), g.UseImmediate(base),
+ g.UseRegister(value), g.UseImmediate(is_atomic));
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.NoOutput(), g.UseRegister(base), g.UseRegister(offset),
+ g.UseRegister(value), g.UseImmediate(is_atomic));
}
}
}
+void InstructionSelector::VisitStore(Node* node) {
+ VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
+ base::nullopt);
+}
+
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1956,16 +1957,28 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
Emit(kPPC_Sync, g.NoOutput());
}
-void InstructionSelector::VisitWord32AtomicLoad(Node* node) { VisitLoad(node); }
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ VisitLoadCommon(this, node, load_rep);
+}
-void InstructionSelector::VisitWord64AtomicLoad(Node* node) { VisitLoad(node); }
+void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ VisitLoadCommon(this, node, load_rep);
+}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- VisitStore(node);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitStoreCommon(this, node, store_params.store_representation(),
+ store_params.order());
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- VisitStore(node);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitStoreCommon(this, node, store_params.store_representation(),
+ store_params.order());
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
@@ -1991,11 +2004,11 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
opcode = kPPC_AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
opcode = kPPC_AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
@@ -2052,11 +2065,11 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
opcode = kPPC_AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
opcode = kPPC_AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
diff --git a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
index 2d92ae1567..559378b19b 100644
--- a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
@@ -106,7 +106,6 @@ class RiscvOperandConverter final : public InstructionOperandConverter {
constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): RPO immediates
- break;
}
UNREACHABLE();
}
@@ -307,17 +306,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- RiscvOperandConverter const& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -336,7 +324,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
do { \
Label binop; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ sync(); \
__ bind(&binop); \
__ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
@@ -351,7 +339,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
size, bin_instr, representation) \
do { \
Label binop; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
if (representation == 32) { \
__ And(i.TempRegister(3), i.TempRegister(0), 0x3); \
} else { \
@@ -380,7 +368,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
Label exchange; \
__ sync(); \
__ bind(&exchange); \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
__ Move(i.TempRegister(1), i.InputRegister(2)); \
__ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
@@ -392,7 +380,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
load_linked, store_conditional, sign_extend, size, representation) \
do { \
Label exchange; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
if (representation == 32) { \
__ And(i.TempRegister(1), i.TempRegister(0), 0x3); \
} else { \
@@ -419,7 +407,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
do { \
Label compareExchange; \
Label exit; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ sync(); \
__ bind(&compareExchange); \
__ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
@@ -438,7 +426,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
do { \
Label compareExchange; \
Label exit; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
if (representation == 32) { \
__ And(i.TempRegister(1), i.TempRegister(0), 0x3); \
} else { \
@@ -570,31 +558,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- // difference = (current - expected) | (expected - current)
- // poison = ~(difference >> (kBitsPerSystemPointer - 1))
- __ ComputeCodeStartAddress(kScratchReg);
- __ Move(kSpeculationPoisonRegister, kScratchReg);
- __ Sub32(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ Sub32(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
- kScratchReg);
- __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ Sra64(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kBitsPerSystemPointer - 1);
- __ Nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kSpeculationPoisonRegister);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ And(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -887,10 +850,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
- case kArchWordPoisonOnSpeculation:
- __ And(i.OutputRegister(), i.InputRegister(0),
- kSpeculationPoisonRegister);
- break;
case kIeee754Float64Acos:
ASSEMBLE_IEEE754_UNOP(acos);
break;
@@ -1094,17 +1053,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kRiscvPopcnt32: {
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- __ Popcnt32(dst, src);
+ __ Popcnt32(dst, src, kScratchReg);
} break;
case kRiscvPopcnt64: {
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- __ Popcnt64(dst, src);
+ __ Popcnt64(dst, src, kScratchReg);
} break;
case kRiscvShl32:
if (instr->InputAt(1)->IsRegister()) {
- __ Sll32(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
+ __ Sll32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
__ Sll32(i.OutputRegister(), i.InputRegister(0),
@@ -1113,8 +1071,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kRiscvShr32:
if (instr->InputAt(1)->IsRegister()) {
- __ Srl32(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
+ __ Srl32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
__ Srl32(i.OutputRegister(), i.InputRegister(0),
@@ -1123,8 +1080,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kRiscvSar32:
if (instr->InputAt(1)->IsRegister()) {
- __ Sra32(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
+ __ Sra32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
__ Sra32(i.OutputRegister(), i.InputRegister(0),
@@ -1553,30 +1509,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kRiscvLbu:
__ Lbu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLb:
__ Lb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvSb:
__ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kRiscvLhu:
__ Lhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLh:
__ Lh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvSh:
__ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1586,27 +1536,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kRiscvLw:
__ Lw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLwu:
__ Lwu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlwu:
__ Ulwu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLd:
__ Ld(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUld:
__ Uld(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvSw:
__ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1625,7 +1569,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kRiscvULoadFloat: {
- __ ULoadFloat(i.OutputSingleRegister(), i.MemoryOperand());
+ __ ULoadFloat(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
break;
}
case kRiscvStoreFloat: {
@@ -1645,14 +1589,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (ft == kDoubleRegZero && !__ IsSingleZeroRegSet()) {
__ LoadFPRImmediate(kDoubleRegZero, 0.0f);
}
- __ UStoreFloat(ft, operand);
+ __ UStoreFloat(ft, operand, kScratchReg);
break;
}
case kRiscvLoadDouble:
__ LoadDouble(i.OutputDoubleRegister(), i.MemoryOperand());
break;
case kRiscvULoadDouble:
- __ ULoadDouble(i.OutputDoubleRegister(), i.MemoryOperand());
+ __ ULoadDouble(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
break;
case kRiscvStoreDouble: {
FPURegister ft = i.InputOrZeroDoubleRegister(2);
@@ -1667,7 +1611,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
__ LoadFPRImmediate(kDoubleRegZero, 0.0);
}
- __ UStoreDouble(ft, i.MemoryOperand());
+ __ UStoreDouble(ft, i.MemoryOperand(), kScratchReg);
break;
}
case kRiscvSync: {
@@ -1723,156 +1667,175 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kRiscvByteSwap64: {
- __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 8);
+ __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 8, kScratchReg);
break;
}
case kRiscvByteSwap32: {
- __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 4);
+ __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 4, kScratchReg);
break;
}
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
break;
- case kWord32AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lh);
break;
- case kWord32AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
break;
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
break;
- case kRiscvWord64AtomicLoadUint8:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
- break;
- case kRiscvWord64AtomicLoadUint16:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
- break;
- case kRiscvWord64AtomicLoadUint32:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
- break;
case kRiscvWord64AtomicLoadUint64:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld);
break;
- case kWord32AtomicStoreWord8:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
- break;
- case kWord32AtomicStoreWord16:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
- break;
- case kWord32AtomicStoreWord32:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
- break;
- case kRiscvWord64AtomicStoreWord8:
+ case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
break;
- case kRiscvWord64AtomicStoreWord16:
+ case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
break;
- case kRiscvWord64AtomicStoreWord32:
+ case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
break;
case kRiscvWord64AtomicStoreWord64:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sd);
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
break;
- case kWord32AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ case kAtomicExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ }
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
break;
- case kWord32AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
- break;
- case kWord32AtomicExchangeWord32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
- break;
- case kRiscvWord64AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
- break;
- case kRiscvWord64AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ case kAtomicExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ }
break;
- case kRiscvWord64AtomicExchangeUint32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ case kAtomicExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ }
break;
case kRiscvWord64AtomicExchangeUint64:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
break;
- case kWord32AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ case kAtomicCompareExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ }
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
break;
- case kWord32AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
- break;
- case kWord32AtomicCompareExchangeWord32:
- __ Sll32(i.InputRegister(2), i.InputRegister(2), 0);
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
- break;
- case kRiscvWord64AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
- break;
- case kRiscvWord64AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ case kAtomicCompareExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ }
break;
- case kRiscvWord64AtomicCompareExchangeUint32:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ case kAtomicCompareExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Sll32(i.InputRegister(2), i.InputRegister(2), 0);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ }
break;
case kRiscvWord64AtomicCompareExchangeUint64:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
break;
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst, 32); \
- break; \
- case kWord32Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst, 32); \
- break; \
- case kWord32Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst, 32); \
- break; \
- case kWord32Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst, 32); \
- break; \
- case kWord32Atomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst); \
- break;
- ATOMIC_BINOP_CASE(Add, Add32)
- ATOMIC_BINOP_CASE(Sub, Sub32)
- ATOMIC_BINOP_CASE(And, And)
- ATOMIC_BINOP_CASE(Or, Or)
- ATOMIC_BINOP_CASE(Xor, Xor)
-#undef ATOMIC_BINOP_CASE
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kRiscvWord64Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst, 64); \
- break; \
- case kRiscvWord64Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst, 64); \
- break; \
- case kRiscvWord64Atomic##op##Uint32: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst, 64); \
- break; \
- case kRiscvWord64Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst); \
+#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
+ case kAtomic##op##Int8: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint8: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Int16: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint16: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Word32: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kRiscvWord64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst64); \
break;
- ATOMIC_BINOP_CASE(Add, Add64)
- ATOMIC_BINOP_CASE(Sub, Sub64)
- ATOMIC_BINOP_CASE(And, And)
- ATOMIC_BINOP_CASE(Or, Or)
- ATOMIC_BINOP_CASE(Xor, Xor)
+ ATOMIC_BINOP_CASE(Add, Add32, Add64)
+ ATOMIC_BINOP_CASE(Sub, Sub32, Sub64)
+ ATOMIC_BINOP_CASE(And, And, And)
+ ATOMIC_BINOP_CASE(Or, Or, Or)
+ ATOMIC_BINOP_CASE(Xor, Xor, Xor)
#undef ATOMIC_BINOP_CASE
case kRiscvAssertEqual:
__ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
@@ -1905,7 +1868,543 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ DecompressAnyTagged(result, operand);
break;
}
+ case kRiscvRvvSt: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ Register dst = i.MemoryOperand().offset() == 0 ? i.MemoryOperand().rm()
+ : kScratchReg;
+ if (i.MemoryOperand().offset() != 0) {
+ __ Add64(dst, i.MemoryOperand().rm(), i.MemoryOperand().offset());
+ }
+ __ vs(i.InputSimd128Register(2), dst, 0, VSew::E8);
+ break;
+ }
+ case kRiscvRvvLd: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ Register src = i.MemoryOperand().offset() == 0 ? i.MemoryOperand().rm()
+ : kScratchReg;
+ if (i.MemoryOperand().offset() != 0) {
+ __ Add64(src, i.MemoryOperand().rm(), i.MemoryOperand().offset());
+ }
+ __ vl(i.OutputSimd128Register(), src, 0, VSew::E8);
+ break;
+ }
+ case kRiscvS128Const: {
+ Simd128Register dst = i.OutputSimd128Register();
+ uint8_t imm[16];
+ *reinterpret_cast<uint64_t*>(imm) =
+ make_uint64(i.InputUint32(1), i.InputUint32(0));
+ *(reinterpret_cast<uint64_t*>(imm) + 1) =
+ make_uint64(i.InputUint32(3), i.InputUint32(2));
+ __ WasmRvvS128const(dst, imm);
+ break;
+ }
+ case kRiscvI64x2Add: {
+ (__ VU).set(kScratchReg, VSew::E64, Vlmul::m1);
+ __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI32x4Add: {
+ (__ VU).set(kScratchReg, VSew::E32, Vlmul::m1);
+ __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8Add: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8AddSatS: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vsadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8AddSatU: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vsaddu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16Add: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16AddSatS: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vsadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16AddSatU: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vsaddu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI64x2Sub: {
+ (__ VU).set(kScratchReg, VSew::E64, Vlmul::m1);
+ __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI32x4Sub: {
+ (__ VU).set(kScratchReg, VSew::E32, Vlmul::m1);
+ __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8Sub: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8SubSatS: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vssub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8SubSatU: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vssubu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16Sub: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16SubSatS: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vssub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16SubSatU: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vssubu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128And: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vand_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128Or: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vor_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128Xor: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vxor_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128Not: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vnot_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvS128AndNot: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vnot_vv(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ vand_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvI32x4ExtractLane: {
+ __ WasmRvvExtractLane(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1), E32, m1);
+ break;
+ }
+ case kRiscvI8x16Splat: {
+ (__ VU).set(kScratchReg, E8, m1);
+ __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvI16x8Splat: {
+ (__ VU).set(kScratchReg, E16, m1);
+ __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvI32x4Splat: {
+ (__ VU).set(kScratchReg, E32, m1);
+ __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvI64x2Splat: {
+ (__ VU).set(kScratchReg, E64, m1);
+ __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvI32x4Abs: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ vmslt_vv(v0, i.InputSimd128Register(0), kSimd128RegZero);
+ __ vsub_vv(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0), Mask);
+ break;
+ }
+ case kRiscvI8x16Eq: {
+ __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8Eq: {
+ __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4Eq: {
+ __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI64x2Eq: {
+ __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E64, m1);
+ break;
+ }
+ case kRiscvI8x16Ne: {
+ __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8Ne: {
+ __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4Ne: {
+ __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI64x2Ne: {
+ __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E64, m1);
+ break;
+ }
+ case kRiscvI8x16GeS: {
+ __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8GeS: {
+ __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4GeS: {
+ __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI64x2GeS: {
+ __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E64, m1);
+ break;
+ }
+ case kRiscvI8x16GeU: {
+ __ WasmRvvGeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8GeU: {
+ __ WasmRvvGeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4GeU: {
+ __ WasmRvvGeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI8x16GtS: {
+ __ WasmRvvGtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8GtS: {
+ __ WasmRvvGtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4GtS: {
+ __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI64x2GtS: {
+ __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E64, m1);
+ break;
+ }
+ case kRiscvI8x16GtU: {
+ __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8GtU: {
+ __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4GtU: {
+ __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI8x16Shl: {
+ __ VU.set(kScratchReg, E8, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ }
+ break;
+ }
+ case kRiscvI16x8Shl: {
+ __ VU.set(kScratchReg, E16, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ }
+ break;
+ }
+ case kRiscvI32x4Shl: {
+ __ VU.set(kScratchReg, E32, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1));
+ }
+ break;
+ }
+ case kRiscvI64x2Shl: {
+ __ VU.set(kScratchReg, E64, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ if (is_int5(i.InputInt6(1))) {
+ __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt6(1));
+ } else {
+ __ li(kScratchReg, i.InputInt6(1));
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg);
+ }
+ }
+ break;
+ }
+ case kRiscvI8x16ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E32, m1);
+ __ li(kScratchReg, 0x1 << i.InputInt8(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ vmerge_vx(dst, i.InputRegister(2), src);
+ break;
+ }
+ case kRiscvI16x8ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E16, m1);
+ __ li(kScratchReg, 0x1 << i.InputInt8(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ vmerge_vx(dst, i.InputRegister(2), src);
+ break;
+ }
+ case kRiscvI64x2ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E64, m1);
+ __ li(kScratchReg, 0x1 << i.InputInt8(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ vmerge_vx(dst, i.InputRegister(2), src);
+ break;
+ }
+ case kRiscvI32x4ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E32, m1);
+ __ li(kScratchReg, 0x1 << i.InputInt8(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ vmerge_vx(dst, i.InputRegister(2), src);
+ break;
+ }
+ case kRiscvI8x16BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ VU.set(kScratchReg, E8, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI16x8BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ VU.set(kScratchReg, E16, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI32x4BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI64x2BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ VU.set(kScratchReg, E64, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvV128AnyTrue: {
+ __ VU.set(kScratchReg, E8, m1);
+ Register dst = i.OutputRegister();
+ Label t;
+ __ vmv_sx(kSimd128ScratchReg, zero_reg);
+ __ vredmaxu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beq(dst, zero_reg, &t);
+ __ li(dst, 1);
+ __ bind(&t);
+ break;
+ }
+ case kRiscvI64x2AllTrue: {
+ __ VU.set(kScratchReg, E64, m1);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ li(kScratchReg, -1);
+ __ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beqz(dst, &all_true);
+ __ li(dst, 1);
+ __ bind(&all_true);
+ break;
+ }
+ case kRiscvI32x4AllTrue: {
+ __ VU.set(kScratchReg, E32, m1);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ li(kScratchReg, -1);
+ __ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beqz(dst, &all_true);
+ __ li(dst, 1);
+ __ bind(&all_true);
+ break;
+ }
+ case kRiscvI16x8AllTrue: {
+ __ VU.set(kScratchReg, E16, m1);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ li(kScratchReg, -1);
+ __ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beqz(dst, &all_true);
+ __ li(dst, 1);
+ __ bind(&all_true);
+ break;
+ }
+ case kRiscvI8x16AllTrue: {
+ __ VU.set(kScratchReg, E8, m1);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ li(kScratchReg, -1);
+ __ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beqz(dst, &all_true);
+ __ li(dst, 1);
+ __ bind(&all_true);
+ break;
+ }
+ case kRiscvI8x16Shuffle: {
+ VRegister dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+
+ int64_t imm1 = make_uint64(i.InputInt32(3), i.InputInt32(2));
+ int64_t imm2 = make_uint64(i.InputInt32(5), i.InputInt32(4));
+ __ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
+ __ li(kScratchReg, 1);
+ __ vmv_vx(v0, kScratchReg);
+ __ li(kScratchReg, imm1);
+ __ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+ __ li(kScratchReg, imm2);
+ __ vsll_vi(v0, v0, 1);
+ __ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+
+ __ VU.set(kScratchReg, E8, m1);
+ if (dst == src0) {
+ __ vmv_vv(kSimd128ScratchReg2, src0);
+ src0 = kSimd128ScratchReg2;
+ } else if (dst == src1) {
+ __ vmv_vv(kSimd128ScratchReg2, src1);
+ src1 = kSimd128ScratchReg2;
+ }
+ __ vrgather_vv(dst, src0, kSimd128ScratchReg);
+ __ vadd_vi(kSimd128ScratchReg, kSimd128ScratchReg, -16);
+ __ vrgather_vv(kSimd128ScratchReg, src1, kSimd128ScratchReg);
+ __ vor_vv(dst, dst, kSimd128ScratchReg);
+ break;
+ }
default:
+#ifdef DEBUG
+ switch (arch_opcode) {
+#define Print(name) \
+ case k##name: \
+ printf("k%s", #name); \
+ break;
+ TARGET_ARCH_OPCODE_LIST(Print);
+#undef Print
+ default:
+ break;
+ }
+#endif
UNIMPLEMENTED();
}
return kSuccess;
@@ -1916,6 +2415,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
<< "\""; \
UNIMPLEMENTED();
+bool IsInludeEqual(Condition cc) {
+ switch (cc) {
+ case equal:
+ case greater_equal:
+ case less_equal:
+ case Uless_equal:
+ case Ugreater_equal:
+ return true;
+ default:
+ return false;
+ }
+}
+
void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
Instruction* instr, FlagsCondition condition,
Label* tlabel, Label* flabel, bool fallthru) {
@@ -1952,7 +2464,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
- break;
}
} else if (instr->arch_opcode() == kRiscvMulOvf32) {
// Overflow occurs if overflow register is not zero
@@ -1965,14 +2476,17 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(kRiscvMulOvf32, condition);
- break;
}
} else if (instr->arch_opcode() == kRiscvCmp) {
cc = FlagsConditionToConditionCmp(condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
} else if (instr->arch_opcode() == kRiscvCmpZero) {
cc = FlagsConditionToConditionCmp(condition);
- __ Branch(tlabel, cc, i.InputRegister(0), Operand(zero_reg));
+ if (i.InputOrZeroRegister(0) == zero_reg && IsInludeEqual(cc)) {
+ __ Branch(tlabel);
+ } else if (i.InputOrZeroRegister(0) != zero_reg) {
+ __ Branch(tlabel, cc, i.InputRegister(0), Operand(zero_reg));
+ }
} else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
cc = FlagsConditionToConditionCmp(condition);
Register lhs_register = sp;
@@ -2011,110 +2525,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- RiscvOperandConverter i(this, instr);
- condition = NegateFlagsCondition(condition);
-
- switch (instr->arch_opcode()) {
- case kRiscvCmp: {
- __ CompareI(kScratchReg, i.InputRegister(0), i.InputOperand(1),
- FlagsConditionToConditionCmp(condition));
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
- }
- return;
- case kRiscvCmpZero: {
- __ CompareI(kScratchReg, i.InputRegister(0), Operand(zero_reg),
- FlagsConditionToConditionCmp(condition));
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
- }
- return;
- case kRiscvTst: {
- switch (condition) {
- case kEqual:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- case kNotEqual:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- default:
- UNREACHABLE();
- }
- }
- return;
- case kRiscvAdd64:
- case kRiscvSub64: {
- // Check for overflow creates 1 or 0 for result.
- __ Srl64(kScratchReg, i.OutputRegister(), 63);
- __ Srl32(kScratchReg2, i.OutputRegister(), 31);
- __ Xor(kScratchReg2, kScratchReg, kScratchReg2);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kRiscvAddOvf64:
- case kRiscvSubOvf64: {
- // Overflow occurs if overflow register is negative
- __ Slt(kScratchReg2, kScratchReg, zero_reg);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kRiscvMulOvf32: {
- // Overflow occurs if overflow register is not zero
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kRiscvCmpS:
- case kRiscvCmpD: {
- bool predicate;
- FlagsConditionToConditionCmpFPU(&predicate, condition);
- if (predicate) {
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
- } else {
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- }
- }
- return;
- default:
- UNREACHABLE();
- }
-}
-
#undef UNSUPPORTED_COND
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
@@ -2489,7 +2899,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -2735,7 +3144,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers
- break;
}
if (destination->IsStackSlot()) __ Sd(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
@@ -2765,7 +3173,21 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (source->IsFPRegister()) {
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kSimd128) {
- UNIMPLEMENTED();
+ VRegister src = g.ToSimd128Register(source);
+ if (destination->IsSimd128Register()) {
+ VRegister dst = g.ToSimd128Register(destination);
+ __ vmv_vv(dst, src);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ Register dst = g.ToMemOperand(destination).offset() == 0
+ ? g.ToMemOperand(destination).rm()
+ : kScratchReg;
+ if (g.ToMemOperand(destination).offset() != 0) {
+ __ Add64(dst, g.ToMemOperand(destination).rm(),
+ g.ToMemOperand(destination).offset());
+ }
+ __ vs(src, dst, 0, E8);
+ }
} else {
FPURegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
@@ -2786,7 +3208,25 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
MemOperand src = g.ToMemOperand(source);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kSimd128) {
- UNIMPLEMENTED();
+ Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
+ if (src.offset() != 0) {
+ __ Add64(src_reg, src.rm(), src.offset());
+ }
+ if (destination->IsSimd128Register()) {
+ __ vl(g.ToSimd128Register(destination), src_reg, 0, E8);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ VRegister temp = kSimd128ScratchReg;
+ Register dst = g.ToMemOperand(destination).offset() == 0
+ ? g.ToMemOperand(destination).rm()
+ : kScratchReg;
+ if (g.ToMemOperand(destination).offset() != 0) {
+ __ Add64(dst, g.ToMemOperand(destination).rm(),
+ g.ToMemOperand(destination).offset());
+ }
+ __ vl(temp, src_reg, 0, E8);
+ __ vs(temp, dst, 0, E8);
+ }
} else {
if (destination->IsFPRegister()) {
if (rep == MachineRepresentation::kFloat32) {
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
index 2f51c2b1c7..0c8d99a8e8 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
@@ -355,7 +355,7 @@ namespace compiler {
V(RiscvS8x16PackOdd) \
V(RiscvS8x16InterleaveEven) \
V(RiscvS8x16InterleaveOdd) \
- V(RiscvS8x16Shuffle) \
+ V(RiscvI8x16Shuffle) \
V(RiscvI8x16Swizzle) \
V(RiscvS8x16Concat) \
V(RiscvS8x8Reverse) \
@@ -373,8 +373,8 @@ namespace compiler {
V(RiscvS128Load32x2U) \
V(RiscvS128LoadLane) \
V(RiscvS128StoreLane) \
- V(RiscvMsaLd) \
- V(RiscvMsaSt) \
+ V(RiscvRvvLd) \
+ V(RiscvRvvSt) \
V(RiscvI32x4SConvertI16x8Low) \
V(RiscvI32x4SConvertI16x8High) \
V(RiscvI32x4UConvertI16x8Low) \
@@ -387,41 +387,14 @@ namespace compiler {
V(RiscvI16x8UConvertI8x16High) \
V(RiscvI8x16SConvertI16x8) \
V(RiscvI8x16UConvertI16x8) \
- V(RiscvWord64AtomicLoadUint8) \
- V(RiscvWord64AtomicLoadUint16) \
- V(RiscvWord64AtomicLoadUint32) \
V(RiscvWord64AtomicLoadUint64) \
- V(RiscvWord64AtomicStoreWord8) \
- V(RiscvWord64AtomicStoreWord16) \
- V(RiscvWord64AtomicStoreWord32) \
V(RiscvWord64AtomicStoreWord64) \
- V(RiscvWord64AtomicAddUint8) \
- V(RiscvWord64AtomicAddUint16) \
- V(RiscvWord64AtomicAddUint32) \
V(RiscvWord64AtomicAddUint64) \
- V(RiscvWord64AtomicSubUint8) \
- V(RiscvWord64AtomicSubUint16) \
- V(RiscvWord64AtomicSubUint32) \
V(RiscvWord64AtomicSubUint64) \
- V(RiscvWord64AtomicAndUint8) \
- V(RiscvWord64AtomicAndUint16) \
- V(RiscvWord64AtomicAndUint32) \
V(RiscvWord64AtomicAndUint64) \
- V(RiscvWord64AtomicOrUint8) \
- V(RiscvWord64AtomicOrUint16) \
- V(RiscvWord64AtomicOrUint32) \
V(RiscvWord64AtomicOrUint64) \
- V(RiscvWord64AtomicXorUint8) \
- V(RiscvWord64AtomicXorUint16) \
- V(RiscvWord64AtomicXorUint32) \
V(RiscvWord64AtomicXorUint64) \
- V(RiscvWord64AtomicExchangeUint8) \
- V(RiscvWord64AtomicExchangeUint16) \
- V(RiscvWord64AtomicExchangeUint32) \
V(RiscvWord64AtomicExchangeUint64) \
- V(RiscvWord64AtomicCompareExchangeUint8) \
- V(RiscvWord64AtomicCompareExchangeUint16) \
- V(RiscvWord64AtomicCompareExchangeUint32) \
V(RiscvWord64AtomicCompareExchangeUint64) \
V(RiscvStoreCompressTagged) \
V(RiscvLoadDecompressTaggedSigned) \
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
index 157b11c930..471628b1f8 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
@@ -318,7 +318,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvS8x2Reverse:
case kRiscvS8x4Reverse:
case kRiscvS8x8Reverse:
- case kRiscvS8x16Shuffle:
+ case kRiscvI8x16Shuffle:
case kRiscvI8x16Swizzle:
case kRiscvSar32:
case kRiscvSignExtendByte:
@@ -352,7 +352,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvLw:
case kRiscvLoadFloat:
case kRiscvLwu:
- case kRiscvMsaLd:
+ case kRiscvRvvLd:
case kRiscvPeek:
case kRiscvUld:
case kRiscvULoadDouble:
@@ -372,9 +372,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvS128Load32x2S:
case kRiscvS128Load32x2U:
case kRiscvS128LoadLane:
- case kRiscvWord64AtomicLoadUint8:
- case kRiscvWord64AtomicLoadUint16:
- case kRiscvWord64AtomicLoadUint32:
case kRiscvWord64AtomicLoadUint64:
case kRiscvLoadDecompressTaggedSigned:
case kRiscvLoadDecompressTaggedPointer:
@@ -383,7 +380,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvModD:
case kRiscvModS:
- case kRiscvMsaSt:
+ case kRiscvRvvSt:
case kRiscvPush:
case kRiscvSb:
case kRiscvSd:
@@ -399,37 +396,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvUsw:
case kRiscvUStoreFloat:
case kRiscvSync:
- case kRiscvWord64AtomicStoreWord8:
- case kRiscvWord64AtomicStoreWord16:
- case kRiscvWord64AtomicStoreWord32:
case kRiscvWord64AtomicStoreWord64:
- case kRiscvWord64AtomicAddUint8:
- case kRiscvWord64AtomicAddUint16:
- case kRiscvWord64AtomicAddUint32:
case kRiscvWord64AtomicAddUint64:
- case kRiscvWord64AtomicSubUint8:
- case kRiscvWord64AtomicSubUint16:
- case kRiscvWord64AtomicSubUint32:
case kRiscvWord64AtomicSubUint64:
- case kRiscvWord64AtomicAndUint8:
- case kRiscvWord64AtomicAndUint16:
- case kRiscvWord64AtomicAndUint32:
case kRiscvWord64AtomicAndUint64:
- case kRiscvWord64AtomicOrUint8:
- case kRiscvWord64AtomicOrUint16:
- case kRiscvWord64AtomicOrUint32:
case kRiscvWord64AtomicOrUint64:
- case kRiscvWord64AtomicXorUint8:
- case kRiscvWord64AtomicXorUint16:
- case kRiscvWord64AtomicXorUint32:
case kRiscvWord64AtomicXorUint64:
- case kRiscvWord64AtomicExchangeUint8:
- case kRiscvWord64AtomicExchangeUint16:
- case kRiscvWord64AtomicExchangeUint32:
case kRiscvWord64AtomicExchangeUint64:
- case kRiscvWord64AtomicCompareExchangeUint8:
- case kRiscvWord64AtomicCompareExchangeUint16:
- case kRiscvWord64AtomicCompareExchangeUint32:
case kRiscvWord64AtomicCompareExchangeUint64:
case kRiscvStoreCompressTagged:
case kRiscvS128StoreLane:
@@ -1169,8 +1142,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return Add64Latency(false) + AndLatency(false) + AssertLatency() +
Add64Latency(false) + AndLatency(false) + BranchShortLatency() +
1 + Sub64Latency() + Add64Latency();
- case kArchWordPoisonOnSpeculation:
- return AndLatency();
case kIeee754Float64Acos:
case kIeee754Float64Acosh:
case kIeee754Float64Asin:
@@ -1541,35 +1512,35 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return ByteSwapSignedLatency();
case kRiscvByteSwap32:
return ByteSwapSignedLatency();
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
return 2;
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
return 3;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
return Word32AtomicExchangeLatency(true, 8);
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
return Word32AtomicExchangeLatency(false, 8);
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
return Word32AtomicExchangeLatency(true, 16);
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
return Word32AtomicExchangeLatency(false, 16);
- case kWord32AtomicExchangeWord32:
+ case kAtomicExchangeWord32:
return 2 + LlLatency(0) + 1 + ScLatency(0) + BranchShortLatency() + 1;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
return Word32AtomicCompareExchangeLatency(true, 8);
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
return Word32AtomicCompareExchangeLatency(false, 8);
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
return Word32AtomicCompareExchangeLatency(true, 16);
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
return Word32AtomicCompareExchangeLatency(false, 16);
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
return 3 + LlLatency(0) + BranchShortLatency() + 1 + ScLatency(0) +
BranchShortLatency() + 1;
case kRiscvAssertEqual:
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
index 72706201e2..85d61aa02f 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
@@ -475,7 +475,7 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kRiscvLd;
break;
case MachineRepresentation::kSimd128:
- opcode = kRiscvMsaLd;
+ opcode = kRiscvRvvLd;
break;
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
@@ -489,16 +489,10 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= MiscField::encode(kMemoryAccessPoisoned);
- }
EmitLoad(this, node, opcode);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -560,7 +554,7 @@ void InstructionSelector::VisitStore(Node* node) {
opcode = kRiscvSd;
break;
case MachineRepresentation::kSimd128:
- opcode = kRiscvMsaSt;
+ opcode = kRiscvRvvSt;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
@@ -569,7 +563,6 @@ void InstructionSelector::VisitStore(Node* node) {
break;
#else
UNREACHABLE();
- break;
#endif
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
@@ -1639,7 +1632,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
opcode = kRiscvUld;
break;
case MachineRepresentation::kSimd128:
- opcode = kRiscvMsaLd;
+ opcode = kRiscvRvvLd;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
@@ -1693,7 +1686,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
opcode = kRiscvUsd;
break;
case MachineRepresentation::kSimd128:
- opcode = kRiscvMsaSt;
+ opcode = kRiscvRvvSt;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
@@ -1789,7 +1782,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
Int32BinopMatcher m(node, true);
NumberBinopMatcher n(node, true);
if (m.right().Is(0) || n.right().IsZero()) {
- VisitWordCompareZero(selector, g.UseRegister(left), cont);
+ VisitWordCompareZero(selector, g.UseRegisterOrImmediateZero(left),
+ cont);
} else {
VisitCompare(selector, opcode, g.UseRegister(left),
g.UseRegister(right), cont);
@@ -1802,7 +1796,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
case kUnsignedGreaterThanOrEqual: {
Int32BinopMatcher m(node, true);
if (m.right().Is(0)) {
- VisitWordCompareZero(selector, g.UseRegister(left), cont);
+ VisitWordCompareZero(selector, g.UseRegisterOrImmediateZero(left),
+ cont);
} else {
VisitCompare(selector, opcode, g.UseRegister(left),
g.UseImmediate(right), cont);
@@ -1811,7 +1806,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
default:
Int32BinopMatcher m(node, true);
if (m.right().Is(0)) {
- VisitWordCompareZero(selector, g.UseRegister(left), cont);
+ VisitWordCompareZero(selector, g.UseRegisterOrImmediateZero(left),
+ cont);
} else {
VisitCompare(selector, opcode, g.UseRegister(left),
g.UseRegister(right), cont);
@@ -1827,10 +1823,13 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
bool IsNodeUnsigned(Node* n) {
NodeMatcher m(n);
- if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() ||
- m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
+ if (m.IsLoad() || m.IsUnalignedLoad() || m.IsProtectedLoad()) {
LoadRepresentation load_rep = LoadRepresentationOf(n->op());
return load_rep.IsUnsigned();
+ } else if (m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(n->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ return load_rep.IsUnsigned();
} else {
return m.IsUint32Div() || m.IsUint32LessThan() ||
m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
@@ -1930,16 +1929,18 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
void EmitWordCompareZero(InstructionSelector* selector, Node* value,
FlagsContinuation* cont) {
RiscvOperandGenerator g(selector);
- selector->EmitWithContinuation(kRiscvCmpZero, g.UseRegister(value), cont);
+ selector->EmitWithContinuation(kRiscvCmpZero,
+ g.UseRegisterOrImmediateZero(value), cont);
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
if (g.CanBeImmediate(index, opcode)) {
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.DefineAsRegister(node), g.UseRegister(base),
g.UseImmediate(index));
} else {
@@ -1947,20 +1948,22 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
}
}
void VisitAtomicStore(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
if (g.CanBeImmediate(index, opcode)) {
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
g.UseRegisterOrImmediateZero(value));
} else {
@@ -1968,14 +1971,15 @@ void VisitAtomicStore(InstructionSelector* selector, Node* node,
selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.NoOutput(), addr_reg, g.TempImmediate(0),
g.UseRegisterOrImmediateZero(value));
}
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1993,12 +1997,13 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
temp[0] = g.TempRegister();
temp[1] = g.TempRegister();
temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2018,12 +2023,13 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
temp[0] = g.TempRegister();
temp[1] = g.TempRegister();
temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2042,7 +2048,8 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
temps[1] = g.TempRegister();
temps[2] = g.TempRegister();
temps[3] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
}
@@ -2404,163 +2411,201 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
+ opcode = kAtomicLoadWord32;
break;
default:
UNREACHABLE();
}
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ MachineRepresentation rep = store_params.representation();
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
+ opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
+ opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
+ opcode = kAtomicStoreWord32;
break;
default:
UNREACHABLE();
}
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode = kRiscvWord64AtomicLoadUint8;
+ opcode = kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = kRiscvWord64AtomicLoadUint16;
+ opcode = kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kRiscvWord64AtomicLoadUint32;
+ opcode = kAtomicLoadWord32;
break;
case MachineRepresentation::kWord64:
opcode = kRiscvWord64AtomicLoadUint64;
break;
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ opcode = kRiscv64LdDecompressTaggedSigned;
+ break;
+ case MachineRepresentation::kTaggedPointer:
+ opcode = kRiscv64LdDecompressTaggedPointer;
+ break;
+ case MachineRepresentation::kTagged:
+ opcode = kRiscv64LdDecompressAnyTagged;
+ break;
+#else
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ if (kTaggedSize == 8) {
+ opcode = kRiscvWord64AtomicLoadUint64;
+ } else {
+ opcode = kAtomicLoadWord32;
+ }
+ break;
+#endif
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ DCHECK(COMPRESS_POINTERS_BOOL);
+ opcode = kAtomicLoadWord32;
+ break;
default:
UNREACHABLE();
}
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ MachineRepresentation rep = store_params.representation();
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kRiscvWord64AtomicStoreWord8;
+ opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kRiscvWord64AtomicStoreWord16;
+ opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kRiscvWord64AtomicStoreWord32;
+ opcode = kAtomicStoreWord32;
break;
case MachineRepresentation::kWord64:
opcode = kRiscvWord64AtomicStoreWord64;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ opcode = kRiscvWord64AtomicStoreWord64;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ CHECK(COMPRESS_POINTERS_BOOL);
+ opcode = kAtomicStoreWord32;
+ break;
default:
UNREACHABLE();
}
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kRiscvWord64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kRiscvWord64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kRiscvWord64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kRiscvWord64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kRiscvWord64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kRiscvWord64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kRiscvWord64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kRiscvWord64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
@@ -2581,15 +2626,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2614,14 +2658,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kRiscvWord64Atomic##op##Uint8, kRiscvWord64Atomic##op##Uint16, \
- kRiscvWord64Atomic##op##Uint32, kRiscvWord64Atomic##op##Uint64); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kRiscvWord64Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2640,6 +2684,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
#define SIMD_TYPE_LIST(V) \
V(F32x4) \
+ V(I64x2) \
V(I32x4) \
V(I16x8) \
V(I8x16)
@@ -2844,6 +2889,7 @@ SIMD_VISIT_SPLAT(F64x2)
SIMD_VISIT_EXTRACT_LANE(F64x2, )
SIMD_VISIT_EXTRACT_LANE(F32x4, )
SIMD_VISIT_EXTRACT_LANE(I32x4, )
+SIMD_VISIT_EXTRACT_LANE(I64x2, )
SIMD_VISIT_EXTRACT_LANE(I16x8, U)
SIMD_VISIT_EXTRACT_LANE(I16x8, S)
SIMD_VISIT_EXTRACT_LANE(I8x16, U)
@@ -2890,73 +2936,75 @@ struct ShuffleEntry {
ArchOpcode opcode;
};
-static const ShuffleEntry arch_shuffles[] = {
- {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
- kRiscvS32x4InterleaveRight},
- {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
- kRiscvS32x4InterleaveLeft},
- {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
- kRiscvS32x4PackEven},
- {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
- kRiscvS32x4PackOdd},
- {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
- kRiscvS32x4InterleaveEven},
- {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
- kRiscvS32x4InterleaveOdd},
-
- {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
- kRiscvS16x8InterleaveRight},
- {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
- kRiscvS16x8InterleaveLeft},
- {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
- kRiscvS16x8PackEven},
- {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
- kRiscvS16x8PackOdd},
- {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
- kRiscvS16x8InterleaveEven},
- {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
- kRiscvS16x8InterleaveOdd},
- {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
- kRiscvS16x4Reverse},
- {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
- kRiscvS16x2Reverse},
-
- {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
- kRiscvS8x16InterleaveRight},
- {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
- kRiscvS8x16InterleaveLeft},
- {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
- kRiscvS8x16PackEven},
- {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
- kRiscvS8x16PackOdd},
- {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
- kRiscvS8x16InterleaveEven},
- {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
- kRiscvS8x16InterleaveOdd},
- {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kRiscvS8x8Reverse},
- {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kRiscvS8x4Reverse},
- {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
- kRiscvS8x2Reverse}};
-
-bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
- size_t num_entries, bool is_swizzle,
- ArchOpcode* opcode) {
- uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
- for (size_t i = 0; i < num_entries; ++i) {
- const ShuffleEntry& entry = table[i];
- int j = 0;
- for (; j < kSimd128Size; ++j) {
- if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
- break;
- }
- }
- if (j == kSimd128Size) {
- *opcode = entry.opcode;
- return true;
- }
- }
- return false;
-}
+// static const ShuffleEntry arch_shuffles[] = {
+// {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+// kRiscvS32x4InterleaveRight},
+// {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+// kRiscvS32x4InterleaveLeft},
+// {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
+// kRiscvS32x4PackEven},
+// {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
+// kRiscvS32x4PackOdd},
+// {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
+// kRiscvS32x4InterleaveEven},
+// {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
+// kRiscvS32x4InterleaveOdd},
+
+// {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+// kRiscvS16x8InterleaveRight},
+// {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+// kRiscvS16x8InterleaveLeft},
+// {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+// kRiscvS16x8PackEven},
+// {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+// kRiscvS16x8PackOdd},
+// {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
+// kRiscvS16x8InterleaveEven},
+// {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
+// kRiscvS16x8InterleaveOdd},
+// {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
+// kRiscvS16x4Reverse},
+// {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
+// kRiscvS16x2Reverse},
+
+// {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+// kRiscvS8x16InterleaveRight},
+// {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+// kRiscvS8x16InterleaveLeft},
+// {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+// kRiscvS8x16PackEven},
+// {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+// kRiscvS8x16PackOdd},
+// {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+// kRiscvS8x16InterleaveEven},
+// {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+// kRiscvS8x16InterleaveOdd},
+// {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
+// kRiscvS8x8Reverse},
+// {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
+// kRiscvS8x4Reverse},
+// {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
+// kRiscvS8x2Reverse}};
+
+// bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+// size_t num_entries, bool is_swizzle,
+// ArchOpcode* opcode) {
+// uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
+// for (size_t i = 0; i < num_entries; ++i) {
+// const ShuffleEntry& entry = table[i];
+// int j = 0;
+// for (; j < kSimd128Size; ++j) {
+// if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+// break;
+// }
+// }
+// if (j == kSimd128Size) {
+// *opcode = entry.opcode;
+// return true;
+// }
+// }
+// return false;
+// }
} // namespace
@@ -2964,29 +3012,29 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
uint8_t shuffle[kSimd128Size];
bool is_swizzle;
CanonicalizeShuffle(node, shuffle, &is_swizzle);
- uint8_t shuffle32x4[4];
- ArchOpcode opcode;
- if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
- is_swizzle, &opcode)) {
- VisitRRR(this, opcode, node);
- return;
- }
Node* input0 = node->InputAt(0);
Node* input1 = node->InputAt(1);
- uint8_t offset;
RiscvOperandGenerator g(this);
- if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
- Emit(kRiscvS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
- g.UseRegister(input0), g.UseImmediate(offset));
- return;
- }
- if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
- Emit(kRiscvS32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
- g.UseRegister(input1),
- g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
- return;
- }
- Emit(kRiscvS8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ // uint8_t shuffle32x4[4];
+ // ArchOpcode opcode;
+ // if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
+ // is_swizzle, &opcode)) {
+ // VisitRRR(this, opcode, node);
+ // return;
+ // }
+ // uint8_t offset;
+ // if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
+ // Emit(kRiscvS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
+ // g.UseRegister(input0), g.UseImmediate(offset));
+ // return;
+ // }
+ // if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ // Emit(kRiscvS32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ // g.UseRegister(input1),
+ // g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
+ // return;
+ // }
+ Emit(kRiscvI8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
g.UseRegister(input1),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 685293169d..3c2c3d6c06 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -985,15 +985,6 @@ void AdjustStackPointerForTailCall(
}
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
- S390OperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(instr->opcode());
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->AndP(value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
@@ -1071,25 +1062,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- Register scratch = r1;
-
- __ ComputeCodeStartAddress(scratch);
-
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ mov(kSpeculationPoisonRegister, Operand::Zero());
- __ mov(r0, Operand(-1));
- __ CmpS64(kJavaScriptCallCodeStartRegister, scratch);
- __ LoadOnConditionP(eq, kSpeculationPoisonRegister, r0);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ AndP(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ AndP(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ AndP(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -1395,10 +1367,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(offset.offset()));
break;
}
- case kArchWordPoisonOnSpeculation:
- DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
- __ AndP(i.InputRegister(0), kSpeculationPoisonRegister);
- break;
case kS390_Peek: {
int reverse_slot = i.InputInt32(0);
int offset =
@@ -2155,7 +2123,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kS390_LoadWordS8:
ASSEMBLE_LOAD_INTEGER(LoadS8);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_BitcastFloat32ToInt32:
ASSEMBLE_UNARY_OP(R_DInstr(MovFloatToInt), R_MInstr(LoadU32), nullInstr);
@@ -2173,35 +2140,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
case kS390_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(LoadU8);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordU16:
ASSEMBLE_LOAD_INTEGER(LoadU16);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(LoadS16);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordU32:
ASSEMBLE_LOAD_INTEGER(LoadU32);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(LoadS32);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse16:
ASSEMBLE_LOAD_INTEGER(lrvh);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse32:
ASSEMBLE_LOAD_INTEGER(lrv);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse64:
ASSEMBLE_LOAD_INTEGER(lrvg);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse16RR:
__ lrvr(i.OutputRegister(), i.InputRegister(0));
@@ -2238,7 +2197,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kS390_LoadWord64:
ASSEMBLE_LOAD_INTEGER(lg);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadAndTestWord32: {
ASSEMBLE_LOADANDTEST32(ltr, lt_z);
@@ -2258,7 +2216,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AddressingMode mode = kMode_None;
MemOperand operand = i.MemoryOperand(&mode);
__ vl(i.OutputSimd128Register(), operand, Condition(0));
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
}
case kS390_StoreWord8:
@@ -2327,40 +2284,37 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lay(i.OutputRegister(), mem);
break;
}
- case kS390_Word64AtomicExchangeUint8:
- case kWord32AtomicExchangeInt8:
- case kWord32AtomicExchangeUint8: {
+ case kAtomicExchangeInt8:
+ case kAtomicExchangeUint8: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
Register output = i.OutputRegister();
__ la(r1, MemOperand(base, index));
__ AtomicExchangeU8(r1, value, output, r0);
- if (opcode == kWord32AtomicExchangeInt8) {
+ if (opcode == kAtomicExchangeInt8) {
__ LoadS8(output, output);
} else {
__ LoadU8(output, output);
}
break;
}
- case kS390_Word64AtomicExchangeUint16:
- case kWord32AtomicExchangeInt16:
- case kWord32AtomicExchangeUint16: {
+ case kAtomicExchangeInt16:
+ case kAtomicExchangeUint16: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
Register output = i.OutputRegister();
__ la(r1, MemOperand(base, index));
__ AtomicExchangeU16(r1, value, output, r0);
- if (opcode == kWord32AtomicExchangeInt16) {
+ if (opcode == kAtomicExchangeInt16) {
__ lghr(output, output);
} else {
__ llghr(output, output);
}
break;
}
- case kS390_Word64AtomicExchangeUint32:
- case kWord32AtomicExchangeWord32: {
+ case kAtomicExchangeWord32: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
@@ -2373,34 +2327,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bne(&do_cs, Label::kNear);
break;
}
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(LoadS8);
break;
- case kS390_Word64AtomicCompareExchangeUint8:
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(LoadU8);
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(LoadS16);
break;
- case kS390_Word64AtomicCompareExchangeUint16:
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(LoadU16);
break;
- case kS390_Word64AtomicCompareExchangeUint32:
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD();
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
+ case kAtomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
__ srlk(result, prev, Operand(shift_right)); \
- __ LoadS8(result, result); \
+ __ LoadS8(result, result); \
}); \
break; \
- case kS390_Word64Atomic##op##Uint8: \
- case kWord32Atomic##op##Uint8: \
+ case kAtomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
__ RotateInsertSelectBits(result, prev, Operand(56), Operand(63), \
@@ -2408,15 +2358,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
true); \
}); \
break; \
- case kWord32Atomic##op##Int16: \
+ case kAtomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
__ srlk(result, prev, Operand(shift_right)); \
- __ LoadS16(result, result); \
+ __ LoadS16(result, result); \
}); \
break; \
- case kS390_Word64Atomic##op##Uint16: \
- case kWord32Atomic##op##Uint16: \
+ case kAtomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
__ RotateInsertSelectBits(result, prev, Operand(48), Operand(63), \
@@ -2430,24 +2379,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Or)
ATOMIC_BINOP_CASE(Xor, Xor)
#undef ATOMIC_BINOP_CASE
- case kS390_Word64AtomicAddUint32:
- case kWord32AtomicAddWord32:
+ case kAtomicAddWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(laa);
break;
- case kS390_Word64AtomicSubUint32:
- case kWord32AtomicSubWord32:
+ case kAtomicSubWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(LoadAndSub32);
break;
- case kS390_Word64AtomicAndUint32:
- case kWord32AtomicAndWord32:
+ case kAtomicAndWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(lan);
break;
- case kS390_Word64AtomicOrUint32:
- case kWord32AtomicOrWord32:
+ case kAtomicOrWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(lao);
break;
- case kS390_Word64AtomicXorUint32:
- case kWord32AtomicXorWord32:
+ case kAtomicXorWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(lax);
break;
case kS390_Word64AtomicAddUint64:
@@ -2482,77 +2426,89 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64();
break;
// Simd Support.
-#define SIMD_BINOP_LIST(V) \
- V(F64x2Add) \
- V(F64x2Sub) \
- V(F64x2Mul) \
- V(F64x2Div) \
- V(F64x2Min) \
- V(F64x2Max) \
- V(F64x2Eq) \
- V(F64x2Ne) \
- V(F64x2Lt) \
- V(F64x2Le) \
- V(F32x4Add) \
- V(F32x4Sub) \
- V(F32x4Mul) \
- V(F32x4Div) \
- V(F32x4Min) \
- V(F32x4Max) \
- V(F32x4Eq) \
- V(F32x4Ne) \
- V(F32x4Lt) \
- V(F32x4Le) \
- V(I64x2Add) \
- V(I64x2Sub) \
- V(I64x2Mul) \
- V(I64x2Eq) \
- V(I64x2Ne) \
- V(I64x2GtS) \
- V(I64x2GeS) \
- V(I32x4Add) \
- V(I32x4Sub) \
- V(I32x4Mul) \
- V(I32x4Eq) \
- V(I32x4Ne) \
- V(I32x4GtS) \
- V(I32x4GeS) \
- V(I32x4GtU) \
- V(I32x4GeU) \
- V(I32x4MinS) \
- V(I32x4MinU) \
- V(I32x4MaxS) \
- V(I32x4MaxU) \
- V(I16x8Add) \
- V(I16x8Sub) \
- V(I16x8Mul) \
- V(I16x8Eq) \
- V(I16x8Ne) \
- V(I16x8GtS) \
- V(I16x8GeS) \
- V(I16x8GtU) \
- V(I16x8GeU) \
- V(I16x8MinS) \
- V(I16x8MinU) \
- V(I16x8MaxS) \
- V(I16x8MaxU) \
- V(I8x16Add) \
- V(I8x16Sub) \
- V(I8x16Eq) \
- V(I8x16Ne) \
- V(I8x16GtS) \
- V(I8x16GeS) \
- V(I8x16GtU) \
- V(I8x16GeU) \
- V(I8x16MinS) \
- V(I8x16MinU) \
- V(I8x16MaxS) \
- V(I8x16MaxU)
-
-#define EMIT_SIMD_BINOP(name) \
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add, Simd128Register) \
+ V(F64x2Sub, Simd128Register) \
+ V(F64x2Mul, Simd128Register) \
+ V(F64x2Div, Simd128Register) \
+ V(F64x2Min, Simd128Register) \
+ V(F64x2Max, Simd128Register) \
+ V(F64x2Eq, Simd128Register) \
+ V(F64x2Ne, Simd128Register) \
+ V(F64x2Lt, Simd128Register) \
+ V(F64x2Le, Simd128Register) \
+ V(F32x4Add, Simd128Register) \
+ V(F32x4Sub, Simd128Register) \
+ V(F32x4Mul, Simd128Register) \
+ V(F32x4Div, Simd128Register) \
+ V(F32x4Min, Simd128Register) \
+ V(F32x4Max, Simd128Register) \
+ V(F32x4Eq, Simd128Register) \
+ V(F32x4Ne, Simd128Register) \
+ V(F32x4Lt, Simd128Register) \
+ V(F32x4Le, Simd128Register) \
+ V(I64x2Add, Simd128Register) \
+ V(I64x2Sub, Simd128Register) \
+ V(I64x2Mul, Simd128Register) \
+ V(I64x2Eq, Simd128Register) \
+ V(I64x2Ne, Simd128Register) \
+ V(I64x2GtS, Simd128Register) \
+ V(I64x2GeS, Simd128Register) \
+ V(I64x2Shl, Register) \
+ V(I64x2ShrS, Register) \
+ V(I64x2ShrU, Register) \
+ V(I32x4Add, Simd128Register) \
+ V(I32x4Sub, Simd128Register) \
+ V(I32x4Mul, Simd128Register) \
+ V(I32x4Eq, Simd128Register) \
+ V(I32x4Ne, Simd128Register) \
+ V(I32x4GtS, Simd128Register) \
+ V(I32x4GeS, Simd128Register) \
+ V(I32x4GtU, Simd128Register) \
+ V(I32x4GeU, Simd128Register) \
+ V(I32x4MinS, Simd128Register) \
+ V(I32x4MinU, Simd128Register) \
+ V(I32x4MaxS, Simd128Register) \
+ V(I32x4MaxU, Simd128Register) \
+ V(I32x4Shl, Register) \
+ V(I32x4ShrS, Register) \
+ V(I32x4ShrU, Register) \
+ V(I16x8Add, Simd128Register) \
+ V(I16x8Sub, Simd128Register) \
+ V(I16x8Mul, Simd128Register) \
+ V(I16x8Eq, Simd128Register) \
+ V(I16x8Ne, Simd128Register) \
+ V(I16x8GtS, Simd128Register) \
+ V(I16x8GeS, Simd128Register) \
+ V(I16x8GtU, Simd128Register) \
+ V(I16x8GeU, Simd128Register) \
+ V(I16x8MinS, Simd128Register) \
+ V(I16x8MinU, Simd128Register) \
+ V(I16x8MaxS, Simd128Register) \
+ V(I16x8MaxU, Simd128Register) \
+ V(I16x8Shl, Register) \
+ V(I16x8ShrS, Register) \
+ V(I16x8ShrU, Register) \
+ V(I8x16Add, Simd128Register) \
+ V(I8x16Sub, Simd128Register) \
+ V(I8x16Eq, Simd128Register) \
+ V(I8x16Ne, Simd128Register) \
+ V(I8x16GtS, Simd128Register) \
+ V(I8x16GeS, Simd128Register) \
+ V(I8x16GtU, Simd128Register) \
+ V(I8x16GeU, Simd128Register) \
+ V(I8x16MinS, Simd128Register) \
+ V(I8x16MinU, Simd128Register) \
+ V(I8x16MaxS, Simd128Register) \
+ V(I8x16MaxU, Simd128Register) \
+ V(I8x16Shl, Register) \
+ V(I8x16ShrS, Register) \
+ V(I8x16ShrU, Register)
+
+#define EMIT_SIMD_BINOP(name, stype) \
case kS390_##name: { \
__ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
- i.InputSimd128Register(1)); \
+ i.Input##stype(1)); \
break; \
}
SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
@@ -2657,64 +2613,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0));
break;
}
- // vector shifts
-#define VECTOR_SHIFT(op, mode) \
- { \
- __ vlvg(kScratchDoubleReg, i.InputRegister(1), MemOperand(r0, 0), \
- Condition(mode)); \
- __ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), \
- Condition(mode)); \
- __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
- kScratchDoubleReg, Condition(0), Condition(0), Condition(mode)); \
- }
- case kS390_I64x2Shl: {
- VECTOR_SHIFT(veslv, 3);
- break;
- }
- case kS390_I64x2ShrS: {
- VECTOR_SHIFT(vesrav, 3);
- break;
- }
- case kS390_I64x2ShrU: {
- VECTOR_SHIFT(vesrlv, 3);
- break;
- }
- case kS390_I32x4Shl: {
- VECTOR_SHIFT(veslv, 2);
- break;
- }
- case kS390_I32x4ShrS: {
- VECTOR_SHIFT(vesrav, 2);
- break;
- }
- case kS390_I32x4ShrU: {
- VECTOR_SHIFT(vesrlv, 2);
- break;
- }
- case kS390_I16x8Shl: {
- VECTOR_SHIFT(veslv, 1);
- break;
- }
- case kS390_I16x8ShrS: {
- VECTOR_SHIFT(vesrav, 1);
- break;
- }
- case kS390_I16x8ShrU: {
- VECTOR_SHIFT(vesrlv, 1);
- break;
- }
- case kS390_I8x16Shl: {
- VECTOR_SHIFT(veslv, 0);
- break;
- }
- case kS390_I8x16ShrS: {
- VECTOR_SHIFT(vesrav, 0);
- break;
- }
- case kS390_I8x16ShrU: {
- VECTOR_SHIFT(vesrlv, 0);
- break;
- }
// vector unary ops
case kS390_F64x2Abs: {
__ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -3489,6 +3387,120 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpkls(dst, dst, kScratchDoubleReg, Condition(0), Condition(3));
break;
}
+#define LOAD_SPLAT(type) \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ __ LoadAndSplat##type##LE(dst, operand);
+ case kS390_S128Load64Splat: {
+ LOAD_SPLAT(64x2);
+ break;
+ }
+ case kS390_S128Load32Splat: {
+ LOAD_SPLAT(32x4);
+ break;
+ }
+ case kS390_S128Load16Splat: {
+ LOAD_SPLAT(16x8);
+ break;
+ }
+ case kS390_S128Load8Splat: {
+ LOAD_SPLAT(8x16);
+ break;
+ }
+#undef LOAD_SPLAT
+#define LOAD_EXTEND(type) \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ __ LoadAndExtend##type##LE(dst, operand);
+ case kS390_S128Load32x2U: {
+ LOAD_EXTEND(32x2U);
+ break;
+ }
+ case kS390_S128Load32x2S: {
+ LOAD_EXTEND(32x2S);
+ break;
+ }
+ case kS390_S128Load16x4U: {
+ LOAD_EXTEND(16x4U);
+ break;
+ }
+ case kS390_S128Load16x4S: {
+ LOAD_EXTEND(16x4S);
+ break;
+ }
+ case kS390_S128Load8x8U: {
+ LOAD_EXTEND(8x8U);
+ break;
+ }
+ case kS390_S128Load8x8S: {
+ LOAD_EXTEND(8x8S);
+ break;
+ }
+#undef LOAD_EXTEND
+#define LOAD_AND_ZERO(type) \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ __ LoadV##type##ZeroLE(dst, operand);
+ case kS390_S128Load32Zero: {
+ LOAD_AND_ZERO(32);
+ break;
+ }
+ case kS390_S128Load64Zero: {
+ LOAD_AND_ZERO(64);
+ break;
+ }
+#undef LOAD_AND_ZERO
+#undef LOAD_EXTEND
+#define LOAD_LANE(type, lane) \
+ AddressingMode mode = kMode_None; \
+ size_t index = 2; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ DCHECK_EQ(dst, i.InputSimd128Register(0)); \
+ __ LoadLane##type##LE(dst, operand, lane);
+ case kS390_S128Load8Lane: {
+ LOAD_LANE(8, 15 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Load16Lane: {
+ LOAD_LANE(16, 7 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Load32Lane: {
+ LOAD_LANE(32, 3 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Load64Lane: {
+ LOAD_LANE(64, 1 - i.InputUint8(1));
+ break;
+ }
+#undef LOAD_LANE
+#define STORE_LANE(type, lane) \
+ AddressingMode mode = kMode_None; \
+ size_t index = 2; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ Simd128Register src = i.InputSimd128Register(0); \
+ __ StoreLane##type##LE(src, operand, lane);
+ case kS390_S128Store8Lane: {
+ STORE_LANE(8, 15 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Store16Lane: {
+ STORE_LANE(16, 7 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Store32Lane: {
+ STORE_LANE(32, 3 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Store64Lane: {
+ STORE_LANE(64, 1 - i.InputUint8(1));
+ break;
+ }
+#undef STORE_LANE
case kS390_StoreCompressTagged: {
CHECK(!instr->HasOutput());
size_t index = 0;
@@ -3541,20 +3553,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(John) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual ||
- condition == kOverflow || condition == kNotOverflow) {
- return;
- }
-
- condition = NegateFlagsCondition(condition);
- __ mov(r0, Operand::Zero());
- __ LoadOnConditionP(FlagsConditionToCondition(condition, kArchNop),
- kSpeculationPoisonRegister, r0);
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3781,7 +3779,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
@@ -4028,7 +4025,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on S390.
- break;
}
if (destination->IsStackSlot()) {
__ StoreU64(dst, g.ToMemOperand(destination), r0);
diff --git a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
index 4eea2fa865..03806b57b1 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
@@ -161,36 +161,12 @@ namespace compiler {
V(S390_StoreReverseSimd128) \
V(S390_StoreFloat32) \
V(S390_StoreDouble) \
- V(S390_CompressSigned) \
- V(S390_CompressPointer) \
- V(S390_CompressAny) \
- V(S390_Word64AtomicExchangeUint8) \
- V(S390_Word64AtomicExchangeUint16) \
- V(S390_Word64AtomicExchangeUint32) \
V(S390_Word64AtomicExchangeUint64) \
- V(S390_Word64AtomicCompareExchangeUint8) \
- V(S390_Word64AtomicCompareExchangeUint16) \
- V(S390_Word64AtomicCompareExchangeUint32) \
V(S390_Word64AtomicCompareExchangeUint64) \
- V(S390_Word64AtomicAddUint8) \
- V(S390_Word64AtomicAddUint16) \
- V(S390_Word64AtomicAddUint32) \
V(S390_Word64AtomicAddUint64) \
- V(S390_Word64AtomicSubUint8) \
- V(S390_Word64AtomicSubUint16) \
- V(S390_Word64AtomicSubUint32) \
V(S390_Word64AtomicSubUint64) \
- V(S390_Word64AtomicAndUint8) \
- V(S390_Word64AtomicAndUint16) \
- V(S390_Word64AtomicAndUint32) \
V(S390_Word64AtomicAndUint64) \
- V(S390_Word64AtomicOrUint8) \
- V(S390_Word64AtomicOrUint16) \
- V(S390_Word64AtomicOrUint32) \
V(S390_Word64AtomicOrUint64) \
- V(S390_Word64AtomicXorUint8) \
- V(S390_Word64AtomicXorUint16) \
- V(S390_Word64AtomicXorUint32) \
V(S390_Word64AtomicXorUint64) \
V(S390_F64x2Splat) \
V(S390_F64x2ReplaceLane) \
@@ -396,6 +372,26 @@ namespace compiler {
V(S390_S128Not) \
V(S390_S128Select) \
V(S390_S128AndNot) \
+ V(S390_S128Load8Splat) \
+ V(S390_S128Load16Splat) \
+ V(S390_S128Load32Splat) \
+ V(S390_S128Load64Splat) \
+ V(S390_S128Load8x8S) \
+ V(S390_S128Load8x8U) \
+ V(S390_S128Load16x4S) \
+ V(S390_S128Load16x4U) \
+ V(S390_S128Load32x2S) \
+ V(S390_S128Load32x2U) \
+ V(S390_S128Load32Zero) \
+ V(S390_S128Load64Zero) \
+ V(S390_S128Load8Lane) \
+ V(S390_S128Load16Lane) \
+ V(S390_S128Load32Lane) \
+ V(S390_S128Load64Lane) \
+ V(S390_S128Store8Lane) \
+ V(S390_S128Store16Lane) \
+ V(S390_S128Store32Lane) \
+ V(S390_S128Store64Lane) \
V(S390_StoreSimd128) \
V(S390_LoadSimd128) \
V(S390_StoreCompressTagged) \
diff --git a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
index afc28b1f8c..d7046507c7 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
@@ -135,9 +135,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_LoadAndTestWord64:
case kS390_LoadAndTestFloat32:
case kS390_LoadAndTestFloat64:
- case kS390_CompressSigned:
- case kS390_CompressPointer:
- case kS390_CompressAny:
case kS390_F64x2Splat:
case kS390_F64x2ReplaceLane:
case kS390_F64x2Abs:
@@ -362,6 +359,22 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_LoadDecompressTaggedSigned:
case kS390_LoadDecompressTaggedPointer:
case kS390_LoadDecompressAnyTagged:
+ case kS390_S128Load8Splat:
+ case kS390_S128Load16Splat:
+ case kS390_S128Load32Splat:
+ case kS390_S128Load64Splat:
+ case kS390_S128Load8x8S:
+ case kS390_S128Load8x8U:
+ case kS390_S128Load16x4S:
+ case kS390_S128Load16x4U:
+ case kS390_S128Load32x2S:
+ case kS390_S128Load32x2U:
+ case kS390_S128Load32Zero:
+ case kS390_S128Load64Zero:
+ case kS390_S128Load8Lane:
+ case kS390_S128Load16Lane:
+ case kS390_S128Load32Lane:
+ case kS390_S128Load64Lane:
return kIsLoadOperation;
case kS390_StoreWord8:
@@ -379,35 +392,18 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_Push:
case kS390_PushFrame:
case kS390_StoreToStackSlot:
+ case kS390_S128Store8Lane:
+ case kS390_S128Store16Lane:
+ case kS390_S128Store32Lane:
+ case kS390_S128Store64Lane:
return kHasSideEffect;
- case kS390_Word64AtomicExchangeUint8:
- case kS390_Word64AtomicExchangeUint16:
- case kS390_Word64AtomicExchangeUint32:
case kS390_Word64AtomicExchangeUint64:
- case kS390_Word64AtomicCompareExchangeUint8:
- case kS390_Word64AtomicCompareExchangeUint16:
- case kS390_Word64AtomicCompareExchangeUint32:
case kS390_Word64AtomicCompareExchangeUint64:
- case kS390_Word64AtomicAddUint8:
- case kS390_Word64AtomicAddUint16:
- case kS390_Word64AtomicAddUint32:
case kS390_Word64AtomicAddUint64:
- case kS390_Word64AtomicSubUint8:
- case kS390_Word64AtomicSubUint16:
- case kS390_Word64AtomicSubUint32:
case kS390_Word64AtomicSubUint64:
- case kS390_Word64AtomicAndUint8:
- case kS390_Word64AtomicAndUint16:
- case kS390_Word64AtomicAndUint32:
case kS390_Word64AtomicAndUint64:
- case kS390_Word64AtomicOrUint8:
- case kS390_Word64AtomicOrUint16:
- case kS390_Word64AtomicOrUint32:
case kS390_Word64AtomicOrUint64:
- case kS390_Word64AtomicXorUint8:
- case kS390_Word64AtomicXorUint16:
- case kS390_Word64AtomicXorUint32:
case kS390_Word64AtomicXorUint64:
return kHasSideEffect;
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index bcf5a8dfff..489065e65f 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -106,7 +106,6 @@ class S390OperandGenerator final : public OperandGenerator {
return OpParameter<int64_t>(node->op());
else
UNIMPLEMENTED();
- return 0L;
}
bool CanBeImmediate(Node* node, OperandModes mode) {
@@ -272,8 +271,7 @@ bool S390OpcodeOnlySupport12BitDisp(InstructionCode op) {
(S390OpcodeOnlySupport12BitDisp(op) ? OperandMode::kUint12Imm \
: OperandMode::kInt20Imm)
-ArchOpcode SelectLoadOpcode(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ArchOpcode SelectLoadOpcode(LoadRepresentation load_rep) {
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
@@ -466,7 +464,8 @@ void GenerateRightOperands(InstructionSelector* selector, Node* node,
} else if (*operand_mode & OperandMode::kAllowMemoryOperand) {
NodeMatcher mright(right);
if (mright.IsLoad() && selector->CanCover(node, right) &&
- canCombineWithLoad(SelectLoadOpcode(right))) {
+ canCombineWithLoad(
+ SelectLoadOpcode(LoadRepresentationOf(right->op())))) {
AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
right, inputs, input_count, OpcodeImmMode(*opcode));
*opcode |= AddressingModeField::encode(mode);
@@ -695,23 +694,23 @@ void InstructionSelector::VisitAbortCSAAssert(Node* node) {
Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r3));
}
-void InstructionSelector::VisitLoad(Node* node) {
+void InstructionSelector::VisitLoad(Node* node, Node* value,
+ InstructionCode opcode) {
S390OperandGenerator g(this);
- InstructionCode opcode = SelectLoadOpcode(node);
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
opcode |= AddressingModeField::encode(mode);
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
Emit(opcode, 1, outputs, input_count, inputs);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ InstructionCode opcode = SelectLoadOpcode(load_rep);
+ VisitLoad(node, node, opcode);
+}
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
@@ -2153,21 +2152,18 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
- load_rep.representation() == MachineRepresentation::kWord16 ||
- load_rep.representation() == MachineRepresentation::kWord32);
- USE(load_rep);
- VisitLoad(node);
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ VisitLoad(node, node, SelectLoadOpcode(load_rep));
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- VisitGeneralStore(this, node, rep);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitGeneralStore(this, node, store_params.representation());
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
S390OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2181,7 +2177,8 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs);
}
@@ -2189,40 +2186,40 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kS390_Word64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kS390_Word64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kS390_Word64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kS390_Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
S390OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2248,7 +2245,8 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
size_t output_count = 0;
outputs[output_count++] = g.DefineSameAsFirst(node);
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, output_count, outputs, input_count, inputs);
}
@@ -2256,40 +2254,40 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Uint8()) {
- opcode = kS390_Word64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kS390_Word64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kS390_Word64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kS390_Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
S390OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2318,7 +2316,8 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
size_t temp_count = 0;
temps[temp_count++] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, output_count, outputs, input_count, inputs, temp_count,
temps);
}
@@ -2342,15 +2341,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2376,14 +2374,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
-#define VISIT_ATOMIC64_BINOP(op) \
- void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kS390_Word64Atomic##op##Uint8, kS390_Word64Atomic##op##Uint16, \
- kS390_Word64Atomic##op##Uint32, kS390_Word64Atomic##op##Uint64); \
+#define VISIT_ATOMIC64_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kS390_Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC64_BINOP(Add)
VISIT_ATOMIC64_BINOP(Sub)
@@ -2393,14 +2391,14 @@ VISIT_ATOMIC64_BINOP(Xor)
#undef VISIT_ATOMIC64_BINOP
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- USE(load_rep);
- VisitLoad(node);
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ VisitLoad(node, node, SelectLoadOpcode(load_rep));
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- VisitGeneralStore(this, node, rep);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitGeneralStore(this, node, store_params.representation());
}
#define SIMD_TYPES(V) \
@@ -2789,18 +2787,107 @@ void InstructionSelector::EmitPrepareResults(
}
void InstructionSelector::VisitLoadLane(Node* node) {
- // We should never reach here, see http://crrev.com/c/2577820
- UNREACHABLE();
+ LoadLaneParameters params = LoadLaneParametersOf(node->op());
+ InstructionCode opcode;
+ if (params.rep == MachineType::Int8()) {
+ opcode = kS390_S128Load8Lane;
+ } else if (params.rep == MachineType::Int16()) {
+ opcode = kS390_S128Load16Lane;
+ } else if (params.rep == MachineType::Int32()) {
+ opcode = kS390_S128Load32Lane;
+ } else if (params.rep == MachineType::Int64()) {
+ opcode = kS390_S128Load64Lane;
+ } else {
+ UNREACHABLE();
+ }
+
+ S390OperandGenerator g(this);
+ InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
+ InstructionOperand inputs[5];
+ size_t input_count = 0;
+
+ inputs[input_count++] = g.UseRegister(node->InputAt(2));
+ inputs[input_count++] = g.UseImmediate(params.laneidx);
+
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ opcode |= AddressingModeField::encode(mode);
+ Emit(opcode, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitLoadTransform(Node* node) {
- // We should never reach here, see http://crrev.com/c/2050811
- UNREACHABLE();
+ LoadTransformParameters params = LoadTransformParametersOf(node->op());
+ ArchOpcode opcode;
+ switch (params.transformation) {
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kS390_S128Load8Splat;
+ break;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kS390_S128Load16Splat;
+ break;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kS390_S128Load32Splat;
+ break;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kS390_S128Load64Splat;
+ break;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kS390_S128Load8x8S;
+ break;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kS390_S128Load8x8U;
+ break;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kS390_S128Load16x4S;
+ break;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kS390_S128Load16x4U;
+ break;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kS390_S128Load32x2S;
+ break;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kS390_S128Load32x2U;
+ break;
+ case LoadTransformation::kS128Load32Zero:
+ opcode = kS390_S128Load32Zero;
+ break;
+ case LoadTransformation::kS128Load64Zero:
+ opcode = kS390_S128Load64Zero;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ VisitLoad(node, node, opcode);
}
void InstructionSelector::VisitStoreLane(Node* node) {
- // We should never reach here, see http://crrev.com/c/2577820
- UNREACHABLE();
+ StoreLaneParameters params = StoreLaneParametersOf(node->op());
+ InstructionCode opcode;
+ if (params.rep == MachineRepresentation::kWord8) {
+ opcode = kS390_S128Store8Lane;
+ } else if (params.rep == MachineRepresentation::kWord16) {
+ opcode = kS390_S128Store16Lane;
+ } else if (params.rep == MachineRepresentation::kWord32) {
+ opcode = kS390_S128Store32Lane;
+ } else if (params.rep == MachineRepresentation::kWord64) {
+ opcode = kS390_S128Store64Lane;
+ } else {
+ UNREACHABLE();
+ }
+
+ S390OperandGenerator g(this);
+ InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
+ InstructionOperand inputs[5];
+ size_t input_count = 0;
+
+ inputs[input_count++] = g.UseRegister(node->InputAt(2));
+ inputs[input_count++] = g.UseImmediate(params.laneidx);
+
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ opcode |= AddressingModeField::encode(mode);
+ Emit(opcode, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index 60a40fb489..3e2298de3e 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -324,11 +324,124 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Zone* zone_;
};
+template <std::memory_order order>
+void EmitStore(TurboAssembler* tasm, Operand operand, Register value,
+ MachineRepresentation rep) {
+ if (order == std::memory_order_relaxed) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ tasm->movb(operand, value);
+ break;
+ case MachineRepresentation::kWord16:
+ tasm->movw(operand, value);
+ break;
+ case MachineRepresentation::kWord32:
+ tasm->movl(operand, value);
+ break;
+ case MachineRepresentation::kWord64:
+ tasm->movq(operand, value);
+ break;
+ case MachineRepresentation::kTagged:
+ tasm->StoreTaggedField(operand, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ }
+
+ DCHECK_EQ(order, std::memory_order_seq_cst);
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ tasm->movq(kScratchRegister, value);
+ tasm->xchgb(kScratchRegister, operand);
+ break;
+ case MachineRepresentation::kWord16:
+ tasm->movq(kScratchRegister, value);
+ tasm->xchgw(kScratchRegister, operand);
+ break;
+ case MachineRepresentation::kWord32:
+ tasm->movq(kScratchRegister, value);
+ tasm->xchgl(kScratchRegister, operand);
+ break;
+ case MachineRepresentation::kWord64:
+ tasm->movq(kScratchRegister, value);
+ tasm->xchgq(kScratchRegister, operand);
+ break;
+ case MachineRepresentation::kTagged:
+ tasm->AtomicStoreTaggedField(operand, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+template <std::memory_order order>
+void EmitStore(TurboAssembler* tasm, Operand operand, Immediate value,
+ MachineRepresentation rep);
+
+template <>
+void EmitStore<std::memory_order_relaxed>(TurboAssembler* tasm, Operand operand,
+ Immediate value,
+ MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ tasm->movb(operand, value);
+ break;
+ case MachineRepresentation::kWord16:
+ tasm->movw(operand, value);
+ break;
+ case MachineRepresentation::kWord32:
+ tasm->movl(operand, value);
+ break;
+ case MachineRepresentation::kWord64:
+ tasm->movq(operand, value);
+ break;
+ case MachineRepresentation::kTagged:
+ tasm->StoreTaggedField(operand, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
#ifdef V8_IS_TSAN
-class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
+void EmitMemoryProbeForTrapHandlerIfNeeded(TurboAssembler* tasm,
+ Register scratch, Operand operand,
+ StubCallMode mode, int size) {
+#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
+ // The wasm OOB trap handler needs to be able to look up the faulting
+ // instruction pointer to handle the SIGSEGV raised by an OOB access. It
+ // will not handle SIGSEGVs raised by the TSAN store helpers. Emit a
+ // redundant load here to give the trap handler a chance to handle any
+ // OOB SIGSEGVs.
+ if (trap_handler::IsTrapHandlerEnabled() &&
+ mode == StubCallMode::kCallWasmRuntimeStub) {
+ switch (size) {
+ case kInt8Size:
+ tasm->movb(scratch, operand);
+ break;
+ case kInt16Size:
+ tasm->movw(scratch, operand);
+ break;
+ case kInt32Size:
+ tasm->movl(scratch, operand);
+ break;
+ case kInt64Size:
+ tasm->movq(scratch, operand);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+#endif
+}
+
+class OutOfLineTSANStore : public OutOfLineCode {
public:
- OutOfLineTSANRelaxedStore(CodeGenerator* gen, Operand operand, Register value,
- Register scratch0, StubCallMode stub_mode, int size)
+ OutOfLineTSANStore(CodeGenerator* gen, Operand operand, Register value,
+ Register scratch0, StubCallMode stub_mode, int size,
+ std::memory_order order)
: OutOfLineCode(gen),
operand_(operand),
value_(value),
@@ -337,6 +450,7 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
stub_mode_(stub_mode),
#endif // V8_ENABLE_WEBASSEMBLY
size_(size),
+ memory_order_(order),
zone_(gen->zone()) {
DCHECK(!AreAliased(value, scratch0));
}
@@ -352,14 +466,15 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
- __ CallTSANRelaxedStoreStub(scratch0_, value_, save_fp_mode, size_,
- StubCallMode::kCallWasmRuntimeStub);
+ tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
+ StubCallMode::kCallWasmRuntimeStub,
+ memory_order_);
return;
}
#endif // V8_ENABLE_WEBASSEMBLY
- __ CallTSANRelaxedStoreStub(scratch0_, value_, save_fp_mode, size_,
- StubCallMode::kCallBuiltinPointer);
+ tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
+ StubCallMode::kCallBuiltinPointer, memory_order_);
}
private:
@@ -370,42 +485,66 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
StubCallMode const stub_mode_;
#endif // V8_ENABLE_WEBASSEMBLY
int size_;
+ const std::memory_order memory_order_;
Zone* zone_;
};
-void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- Register value_reg, X64OperandConverter& i,
- StubCallMode mode, int size) {
+void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, TurboAssembler* tasm,
+ Operand operand, Register value_reg,
+ X64OperandConverter& i, StubCallMode mode, int size,
+ std::memory_order order) {
// The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the
// root register.
// TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING
- // path. It is not crucial, but it would be nice to remove this if.
- if (codegen->code_kind() == CodeKind::FOR_TESTING) return;
+ // path. It is not crucial, but it would be nice to remove this restriction.
+ DCHECK_NE(codegen->code_kind(), CodeKind::FOR_TESTING);
Register scratch0 = i.TempRegister(0);
- auto tsan_ool = zone->New<OutOfLineTSANRelaxedStore>(
- codegen, operand, value_reg, scratch0, mode, size);
+ auto tsan_ool = zone->New<OutOfLineTSANStore>(codegen, operand, value_reg,
+ scratch0, mode, size, order);
tasm->jmp(tsan_ool->entry());
tasm->bind(tsan_ool->exit());
}
-void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- Immediate value, X64OperandConverter& i,
- StubCallMode mode, int size) {
+template <std::memory_order order>
+Register GetTSANValueRegister(TurboAssembler* tasm, Register value,
+ X64OperandConverter& i) {
+ return value;
+}
+
+template <std::memory_order order>
+Register GetTSANValueRegister(TurboAssembler* tasm, Immediate value,
+ X64OperandConverter& i);
+
+template <>
+Register GetTSANValueRegister<std::memory_order_relaxed>(
+ TurboAssembler* tasm, Immediate value, X64OperandConverter& i) {
+ Register value_reg = i.TempRegister(1);
+ tasm->movq(value_reg, value);
+ return value_reg;
+}
+
+template <std::memory_order order, typename ValueT>
+void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
+ TurboAssembler* tasm, Operand operand, ValueT value,
+ X64OperandConverter& i, StubCallMode stub_call_mode,
+ MachineRepresentation rep) {
// The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the
// root register.
// TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING
- // path. It is not crucial, but it would be nice to remove this if.
- if (codegen->code_kind() == CodeKind::FOR_TESTING) return;
-
- Register value_reg = i.TempRegister(1);
- tasm->movq(value_reg, value);
- EmitTSANStoreOOLIfNeeded(zone, codegen, tasm, operand, value_reg, i, mode,
- size);
+ // path. It is not crucial, but it would be nice to remove this restriction.
+ if (codegen->code_kind() != CodeKind::FOR_TESTING) {
+ int size = ElementSizeInBytes(rep);
+ EmitMemoryProbeForTrapHandlerIfNeeded(tasm, i.TempRegister(0), operand,
+ stub_call_mode, size);
+ Register value_reg = GetTSANValueRegister<order>(tasm, value, i);
+ EmitTSANStoreOOL(zone, codegen, tasm, operand, value_reg, i, stub_call_mode,
+ size, order);
+ } else {
+ EmitStore<order>(tasm, operand, value, rep);
+ }
}
class OutOfLineTSANRelaxedLoad final : public OutOfLineCode {
@@ -453,10 +592,10 @@ class OutOfLineTSANRelaxedLoad final : public OutOfLineCode {
Zone* zone_;
};
-void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- X64OperandConverter& i, StubCallMode mode,
- int size) {
+void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
+ TurboAssembler* tasm, Operand operand,
+ X64OperandConverter& i, StubCallMode mode,
+ int size) {
// The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the
// root register.
@@ -472,20 +611,20 @@ void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
}
#else
-void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- Register value_reg, X64OperandConverter& i,
- StubCallMode mode, int size) {}
-
-void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- Immediate value, X64OperandConverter& i,
- StubCallMode mode, int size) {}
-
-void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- X64OperandConverter& i, StubCallMode mode,
- int size) {}
+template <std::memory_order order, typename ValueT>
+void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
+ TurboAssembler* tasm, Operand operand, ValueT value,
+ X64OperandConverter& i, StubCallMode stub_call_mode,
+ MachineRepresentation rep) {
+ DCHECK(order == std::memory_order_relaxed ||
+ order == std::memory_order_seq_cst);
+ EmitStore<order>(tasm, operand, value, rep);
+}
+
+void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
+ TurboAssembler* tasm, Operand operand,
+ X64OperandConverter& i, StubCallMode mode,
+ int size) {}
#endif // V8_IS_TSAN
#if V8_ENABLE_WEBASSEMBLY
@@ -569,16 +708,6 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
#endif // V8_ENABLE_WEBASSEMBLY
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- X64OperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->andq(value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_UNOP(asm_instr) \
@@ -871,24 +1000,32 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} \
} while (false)
-#define ASSEMBLE_PINSR(ASM_INSTR) \
- do { \
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
- XMMRegister dst = i.OutputSimd128Register(); \
- XMMRegister src = i.InputSimd128Register(0); \
- uint8_t laneidx = i.InputUint8(1); \
- if (HasAddressingMode(instr)) { \
- __ ASM_INSTR(dst, src, i.MemoryOperand(2), laneidx); \
- break; \
- } \
- if (instr->InputAt(2)->IsFPRegister()) { \
- __ Movq(kScratchRegister, i.InputDoubleRegister(2)); \
- __ ASM_INSTR(dst, src, kScratchRegister, laneidx); \
- } else if (instr->InputAt(2)->IsRegister()) { \
- __ ASM_INSTR(dst, src, i.InputRegister(2), laneidx); \
- } else { \
- __ ASM_INSTR(dst, src, i.InputOperand(2), laneidx); \
- } \
+#define ASSEMBLE_PINSR(ASM_INSTR) \
+ do { \
+ XMMRegister dst = i.OutputSimd128Register(); \
+ XMMRegister src = i.InputSimd128Register(0); \
+ uint8_t laneidx = i.InputUint8(1); \
+ uint32_t load_offset; \
+ if (HasAddressingMode(instr)) { \
+ __ ASM_INSTR(dst, src, i.MemoryOperand(2), laneidx, &load_offset); \
+ } else if (instr->InputAt(2)->IsFPRegister()) { \
+ __ Movq(kScratchRegister, i.InputDoubleRegister(2)); \
+ __ ASM_INSTR(dst, src, kScratchRegister, laneidx, &load_offset); \
+ } else if (instr->InputAt(2)->IsRegister()) { \
+ __ ASM_INSTR(dst, src, i.InputRegister(2), laneidx, &load_offset); \
+ } else { \
+ __ ASM_INSTR(dst, src, i.InputOperand(2), laneidx, &load_offset); \
+ } \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, load_offset); \
+ } while (false)
+
+#define ASSEMBLE_SEQ_CST_STORE(rep) \
+ do { \
+ Register value = i.InputRegister(0); \
+ Operand operand = i.MemoryOperand(1); \
+ EmitTSANAwareStore<std::memory_order_seq_cst>( \
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), \
+ rep); \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -1019,22 +1156,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, not_zero);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // Set a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ ComputeCodeStartAddress(rbx);
- __ xorq(kSpeculationPoisonRegister, kSpeculationPoisonRegister);
- __ cmpq(kJavaScriptCallCodeStartRegister, rbx);
- __ Move(rbx, -1);
- __ cmovq(equal, kSpeculationPoisonRegister, rbx);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ andq(kJSFunctionRegister, kSpeculationPoisonRegister);
- __ andq(kContextRegister, kSpeculationPoisonRegister);
- __ andq(rsp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -1052,11 +1173,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(reg);
- } else {
- __ call(reg);
- }
+ __ call(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -1078,19 +1195,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
__ near_call(wasm_code, constant.rmode());
} else {
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(wasm_code, constant.rmode());
- } else {
- __ Call(wasm_code, constant.rmode());
- }
+ __ Call(wasm_code, constant.rmode());
}
} else {
- Register reg = i.InputRegister(0);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(reg);
- } else {
- __ call(reg);
- }
+ __ call(i.InputRegister(0));
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -1107,12 +1215,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ jmp(kScratchRegister);
}
} else {
- Register reg = i.InputRegister(0);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(i.InputRegister(0));
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@@ -1130,11 +1233,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(reg);
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@@ -1147,11 +1246,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(reg);
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -1344,7 +1439,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movl(result, result);
break;
}
- case kArchStoreWithWriteBarrier: {
+ case kArchStoreWithWriteBarrier: // Fall through.
+ case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
@@ -1356,7 +1452,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = zone()->New<OutOfLineRecordWrite>(this, object, operand, value,
scratch0, scratch1, mode,
DetermineStubCallMode());
- __ StoreTaggedField(operand, value);
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kTagged);
+ } else {
+ DCHECK_EQ(arch_opcode, kArchAtomicStoreWithWriteBarrier);
+ EmitTSANAwareStore<std::memory_order_seq_cst>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kTagged);
+ }
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@@ -1364,14 +1469,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemoryChunk::kPointersFromHereAreInterestingMask,
not_zero, ool->entry());
__ bind(ool->exit());
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kTaggedSize);
break;
}
- case kArchWordPoisonOnSpeculation:
- DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
- __ andq(i.InputRegister(0), kSpeculationPoisonRegister);
- break;
case kX64MFence:
__ mfence();
break;
@@ -1646,22 +1745,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// when there is a (v)mulss depending on the result.
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
- case kSSEFloat32Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ Pcmpeqd(tmp, tmp);
- __ Psrlq(tmp, byte{33});
- __ Andps(i.OutputDoubleRegister(), tmp);
- break;
- }
- case kSSEFloat32Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ Pcmpeqd(tmp, tmp);
- __ Psllq(tmp, byte{31});
- __ Xorps(i.OutputDoubleRegister(), tmp);
- break;
- }
case kSSEFloat32Sqrt:
ASSEMBLE_SSE_UNOP(sqrtss);
break;
@@ -1858,16 +1941,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(ool->exit());
break;
}
- case kX64F64x2Abs:
- case kSSEFloat64Abs: {
- __ Abspd(i.OutputDoubleRegister());
- break;
- }
- case kX64F64x2Neg:
- case kSSEFloat64Neg: {
- __ Negpd(i.OutputDoubleRegister());
- break;
- }
case kSSEFloat64Sqrt:
ASSEMBLE_SSE_UNOP(Sqrtsd);
break;
@@ -2120,56 +2193,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// when there is a (v)mulsd depending on the result.
__ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
- case kAVXFloat32Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ vpcmpeqd(tmp, tmp, tmp);
- __ vpsrlq(tmp, tmp, 33);
- if (instr->InputAt(0)->IsFPRegister()) {
- __ vandps(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
- } else {
- __ vandps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
- }
+ case kX64Float32Abs: {
+ __ Absps(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
- case kAVXFloat32Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ vpcmpeqd(tmp, tmp, tmp);
- __ vpsllq(tmp, tmp, 31);
- if (instr->InputAt(0)->IsFPRegister()) {
- __ vxorps(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
- } else {
- __ vxorps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
- }
+ case kX64Float32Neg: {
+ __ Negps(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
- case kAVXFloat64Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ vpcmpeqd(tmp, tmp, tmp);
- __ vpsrlq(tmp, tmp, 1);
- if (instr->InputAt(0)->IsFPRegister()) {
- __ vandpd(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
- } else {
- __ vandpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
- }
+ case kX64F64x2Abs:
+ case kX64Float64Abs: {
+ __ Abspd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
- case kAVXFloat64Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ vpcmpeqd(tmp, tmp, tmp);
- __ vpsllq(tmp, tmp, 63);
- if (instr->InputAt(0)->IsFPRegister()) {
- __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
- } else {
- __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
- }
+ case kX64F64x2Neg:
+ case kX64Float64Neg: {
+ __ Negpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kSSEFloat64SilenceNaN:
@@ -2180,24 +2219,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxbl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxbl);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxbq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxbq);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxbq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxbq);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movb: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -2205,29 +2240,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(Immediate(i.InputInt8(index)));
- __ movb(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt8Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord8);
} else {
Register value(i.InputRegister(index));
- __ movb(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt8Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord8);
}
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64Movsxwl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxwl);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxwl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxwl);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxwq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -2237,7 +2269,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxwq);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movw: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -2245,16 +2276,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(Immediate(i.InputInt16(index)));
- __ movw(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt16Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord16);
} else {
Register value(i.InputRegister(index));
- __ movw(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt16Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord16);
}
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64Movl:
@@ -2263,8 +2293,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasAddressingMode(instr)) {
Operand address(i.MemoryOperand());
__ movl(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kInt32Size);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kInt32Size);
} else {
if (HasRegisterInput(instr, 0)) {
__ movl(i.OutputRegister(), i.InputRegister(0));
@@ -2278,48 +2308,43 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index));
- __ movl(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt32Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord32);
} else {
Register value(i.InputRegister(index));
- __ movl(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt32Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord32);
}
}
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxlq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxlq);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64MovqDecompressTaggedSigned: {
CHECK(instr->HasOutput());
Operand address(i.MemoryOperand());
__ DecompressTaggedSigned(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kTaggedSize);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kTaggedSize);
break;
}
case kX64MovqDecompressTaggedPointer: {
CHECK(instr->HasOutput());
Operand address(i.MemoryOperand());
__ DecompressTaggedPointer(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kTaggedSize);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kTaggedSize);
break;
}
case kX64MovqDecompressAnyTagged: {
CHECK(instr->HasOutput());
Operand address(i.MemoryOperand());
__ DecompressAnyTagged(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kTaggedSize);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kTaggedSize);
break;
}
case kX64MovqCompressTagged: {
@@ -2328,14 +2353,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index));
- __ StoreTaggedField(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kTaggedSize);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kTagged);
} else {
Register value(i.InputRegister(index));
- __ StoreTaggedField(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kTaggedSize);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kTagged);
}
break;
}
@@ -2344,24 +2369,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->HasOutput()) {
Operand address(i.MemoryOperand());
__ movq(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kInt64Size);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kInt64Size);
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index));
- __ movq(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt64Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord64);
} else {
Register value(i.InputRegister(index));
- __ movq(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt64Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord64);
}
}
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movss:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -2376,17 +2400,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64Movsd: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- // If we have to poison the loaded value, we load into a general
- // purpose register first, mask it with the poison, and move the
- // value from the general purpose register into the double register.
- __ movq(kScratchRegister, i.MemoryOperand());
- __ andq(kScratchRegister, kSpeculationPoisonRegister);
- __ Movq(i.OutputDoubleRegister(), kScratchRegister);
- } else {
- __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
- }
+ __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
@@ -2667,27 +2681,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F64x2Qfma: {
- if (CpuFeatures::IsSupported(FMA3)) {
- CpuFeatureScope fma3_scope(tasm(), FMA3);
- __ vfmadd231pd(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(2));
- } else {
- __ Movapd(kScratchDoubleReg, i.InputSimd128Register(2));
- __ Mulpd(kScratchDoubleReg, i.InputSimd128Register(1));
- __ Addpd(i.OutputSimd128Register(), kScratchDoubleReg);
- }
+ __ F64x2Qfma(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
break;
}
case kX64F64x2Qfms: {
- if (CpuFeatures::IsSupported(FMA3)) {
- CpuFeatureScope fma3_scope(tasm(), FMA3);
- __ vfnmadd231pd(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(2));
- } else {
- __ Movapd(kScratchDoubleReg, i.InputSimd128Register(2));
- __ Mulpd(kScratchDoubleReg, i.InputSimd128Register(1));
- __ Subpd(i.OutputSimd128Register(), kScratchDoubleReg);
- }
+ __ F64x2Qfms(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
break;
}
case kX64F64x2ConvertLowI32x4S: {
@@ -2696,7 +2698,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64F64x2ConvertLowI32x4U: {
__ F64x2ConvertLowI32x4U(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchRegister);
break;
}
case kX64F64x2PromoteLowF32x4: {
@@ -2709,12 +2711,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I32x4TruncSatF64x2SZero: {
__ I32x4TruncSatF64x2SZero(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64I32x4TruncSatF64x2UZero: {
__ I32x4TruncSatF64x2UZero(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64F32x4Splat: {
@@ -2868,27 +2872,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F32x4Qfma: {
- if (CpuFeatures::IsSupported(FMA3)) {
- CpuFeatureScope fma3_scope(tasm(), FMA3);
- __ vfmadd231ps(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(2));
- } else {
- __ Movaps(kScratchDoubleReg, i.InputSimd128Register(2));
- __ Mulps(kScratchDoubleReg, i.InputSimd128Register(1));
- __ Addps(i.OutputSimd128Register(), kScratchDoubleReg);
- }
+ __ F32x4Qfma(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
break;
}
case kX64F32x4Qfms: {
- if (CpuFeatures::IsSupported(FMA3)) {
- CpuFeatureScope fma3_scope(tasm(), FMA3);
- __ vfnmadd231ps(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(2));
- } else {
- __ Movaps(kScratchDoubleReg, i.InputSimd128Register(2));
- __ Mulps(kScratchDoubleReg, i.InputSimd128Register(1));
- __ Subps(i.OutputSimd128Register(), kScratchDoubleReg);
- }
+ __ F32x4Qfms(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
break;
}
case kX64F32x4Pmin: {
@@ -3084,21 +3076,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4SConvertF32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister dst = i.OutputSimd128Register();
- // NAN->0
- __ Movaps(kScratchDoubleReg, dst);
- __ Cmpeqps(kScratchDoubleReg, kScratchDoubleReg);
- __ Pand(dst, kScratchDoubleReg);
- // Set top bit if >= 0 (but not -0.0!)
- __ Pxor(kScratchDoubleReg, dst);
- // Convert
- __ Cvttps2dq(dst, dst);
- // Set top bit if >=0 is now < 0
- __ Pand(kScratchDoubleReg, dst);
- __ Psrad(kScratchDoubleReg, byte{31});
- // Set positive overflow lanes to 0x7FFFFFFF
- __ Pxor(dst, kScratchDoubleReg);
+ __ I32x4SConvertF32x4(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64I32x4SConvertI16x8Low: {
@@ -3252,21 +3232,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4ExtAddPairwiseI16x8S: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src1 = i.InputSimd128Register(0);
- // pmaddwd multiplies signed words in src1 and src2, producing signed
- // doublewords, then adds pairwise.
- // src1 = |a|b|c|d|e|f|g|h|
- // src2 = |1|1|1|1|1|1|1|1|
- // dst = | a*1 + b*1 | c*1 + d*1 | e*1 + f*1 | g*1 + h*1 |
- Operand src2 = __ ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i16x8_splat_0x0001());
- __ Pmaddwd(dst, src1, src2);
+ __ I32x4ExtAddPairwiseI16x8S(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchRegister);
break;
}
case kX64I32x4ExtAddPairwiseI16x8U: {
__ I32x4ExtAddPairwiseI16x8U(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0),
+ kScratchDoubleReg);
break;
}
case kX64S128Const: {
@@ -3293,12 +3266,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64I16x8Splat: {
XMMRegister dst = i.OutputSimd128Register();
if (HasRegisterInput(instr, 0)) {
- __ Movd(dst, i.InputRegister(0));
+ __ I16x8Splat(dst, i.InputRegister(0));
} else {
- __ Movd(dst, i.InputOperand(0));
+ __ I16x8Splat(dst, i.InputOperand(0));
}
- __ Pshuflw(dst, dst, uint8_t{0x0});
- __ Pshufd(dst, dst, uint8_t{0x0});
break;
}
case kX64I16x8ExtractLaneS: {
@@ -3481,43 +3452,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I16x8ExtAddPairwiseI8x16S: {
__ I16x8ExtAddPairwiseI8x16S(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64I16x8ExtAddPairwiseI8x16U: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src1 = i.InputSimd128Register(0);
- Operand src2 = __ ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x01());
- __ Pmaddubsw(dst, src1, src2);
+ __ I16x8ExtAddPairwiseI8x16U(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchRegister);
break;
}
case kX64I16x8Q15MulRSatS: {
__ I16x8Q15MulRSatS(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kX64I8x16Splat: {
XMMRegister dst = i.OutputSimd128Register();
- if (CpuFeatures::IsSupported(AVX2)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- CpuFeatureScope avx2_scope(tasm(), AVX2);
- if (HasRegisterInput(instr, 0)) {
- __ vmovd(kScratchDoubleReg, i.InputRegister(0));
- __ vpbroadcastb(dst, kScratchDoubleReg);
- } else {
- __ vpbroadcastb(dst, i.InputOperand(0));
- }
+ if (HasRegisterInput(instr, 0)) {
+ __ I8x16Splat(dst, i.InputRegister(0), kScratchDoubleReg);
} else {
- if (HasRegisterInput(instr, 0)) {
- __ Movd(dst, i.InputRegister(0));
- } else {
- __ Movd(dst, i.InputOperand(0));
- }
- __ Xorps(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(dst, kScratchDoubleReg);
+ __ I8x16Splat(dst, i.InputOperand(0), kScratchDoubleReg);
}
-
break;
}
case kX64Pextrb: {
@@ -3586,66 +3541,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I8x16Shl: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // Temp registers for shift mask and additional moves to XMM registers.
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
if (HasImmediateInput(instr, 1)) {
- // Perform 16-bit shift, then mask away low bits.
- uint8_t shift = i.InputInt3(1);
- __ Psllw(dst, byte{shift});
-
- uint8_t bmask = static_cast<uint8_t>(0xff << shift);
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- __ movl(tmp, Immediate(mask));
- __ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, uint8_t{0});
- __ Pand(dst, tmp_simd);
+ __ I8x16Shl(dst, src, i.InputInt3(1), kScratchRegister,
+ kScratchDoubleReg);
} else {
- // Mask off the unwanted bits before word-shifting.
- __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- // Take shift value modulo 8.
- __ movq(tmp, i.InputRegister(1));
- __ andq(tmp, Immediate(7));
- __ addq(tmp, Immediate(8));
- __ Movq(tmp_simd, tmp);
- __ Psrlw(kScratchDoubleReg, tmp_simd);
- __ Packuswb(kScratchDoubleReg, kScratchDoubleReg);
- __ Pand(dst, kScratchDoubleReg);
- // TODO(zhin): subq here to avoid asking for another temporary register,
- // examine codegen for other i8x16 shifts, they use less instructions.
- __ subq(tmp, Immediate(8));
- __ Movq(tmp_simd, tmp);
- __ Psllw(dst, tmp_simd);
+ __ I8x16Shl(dst, src, i.InputRegister(1), kScratchRegister,
+ kScratchDoubleReg, i.TempSimd128Register(0));
}
break;
}
case kX64I8x16ShrS: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
if (HasImmediateInput(instr, 1)) {
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- uint8_t shift = i.InputInt3(1) + 8;
- __ Psraw(kScratchDoubleReg, shift);
- __ Psraw(dst, shift);
- __ Packsswb(dst, kScratchDoubleReg);
+ __ I8x16ShrS(dst, src, i.InputInt3(1), kScratchDoubleReg);
} else {
- // Temp registers for shift mask andadditional moves to XMM registers.
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
- // Unpack the bytes into words, do arithmetic shifts, and repack.
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- // Prepare shift value
- __ movq(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ andq(tmp, Immediate(7));
- __ addq(tmp, Immediate(8));
- __ Movq(tmp_simd, tmp);
- __ Psraw(kScratchDoubleReg, tmp_simd);
- __ Psraw(dst, tmp_simd);
- __ Packsswb(dst, kScratchDoubleReg);
+ __ I8x16ShrS(dst, src, i.InputRegister(1), kScratchRegister,
+ kScratchDoubleReg, i.TempSimd128Register(0));
}
break;
}
@@ -3701,34 +3616,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I8x16ShrU: {
XMMRegister dst = i.OutputSimd128Register();
- // Unpack the bytes into words, do logical shifts, and repack.
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // Temp registers for shift mask andadditional moves to XMM registers.
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
if (HasImmediateInput(instr, 1)) {
- // Perform 16-bit shift, then mask away high bits.
- uint8_t shift = i.InputInt3(1);
- __ Psrlw(dst, byte{shift});
-
- uint8_t bmask = 0xff >> shift;
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- __ movl(tmp, Immediate(mask));
- __ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, byte{0});
- __ Pand(dst, tmp_simd);
+ __ I8x16ShrU(dst, src, i.InputInt3(1), kScratchRegister,
+ kScratchDoubleReg);
} else {
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- // Prepare shift value
- __ movq(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ andq(tmp, Immediate(7));
- __ addq(tmp, Immediate(8));
- __ Movq(tmp_simd, tmp);
- __ Psrlw(kScratchDoubleReg, tmp_simd);
- __ Psrlw(dst, tmp_simd);
- __ Packuswb(dst, kScratchDoubleReg);
+ __ I8x16ShrU(dst, src, i.InputRegister(1), kScratchRegister,
+ kScratchDoubleReg, i.TempSimd128Register(0));
}
break;
}
@@ -3834,9 +3729,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16Swizzle: {
- bool omit_add = MiscField::decode(instr->opcode());
__ I8x16Swizzle(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), omit_add);
+ i.InputSimd128Register(1), kScratchDoubleReg,
+ kScratchRegister, MiscField::decode(instr->opcode()));
break;
}
case kX64I8x16Shuffle: {
@@ -3888,45 +3783,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I8x16Popcnt: {
__ I8x16Popcnt(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.TempSimd128Register(0));
+ i.TempSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64S128Load8Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- XMMRegister dst = i.OutputSimd128Register();
- if (CpuFeatures::IsSupported(AVX2)) {
- CpuFeatureScope avx2_scope(tasm(), AVX2);
- __ vpbroadcastb(dst, i.MemoryOperand());
- } else {
- __ Pinsrb(dst, dst, i.MemoryOperand(), 0);
- __ Pxor(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(dst, kScratchDoubleReg);
- }
+ __ S128Load8Splat(i.OutputSimd128Register(), i.MemoryOperand(),
+ kScratchDoubleReg);
break;
}
case kX64S128Load16Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- XMMRegister dst = i.OutputSimd128Register();
- if (CpuFeatures::IsSupported(AVX2)) {
- CpuFeatureScope avx2_scope(tasm(), AVX2);
- __ vpbroadcastw(dst, i.MemoryOperand());
- } else {
- __ Pinsrw(dst, dst, i.MemoryOperand(), 0);
- __ Pshuflw(dst, dst, uint8_t{0});
- __ Punpcklqdq(dst, dst);
- }
+ __ S128Load16Splat(i.OutputSimd128Register(), i.MemoryOperand(),
+ kScratchDoubleReg);
break;
}
case kX64S128Load32Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vbroadcastss(i.OutputSimd128Register(), i.MemoryOperand());
- } else {
- __ movss(i.OutputSimd128Register(), i.MemoryOperand());
- __ shufps(i.OutputSimd128Register(), i.OutputSimd128Register(),
- byte{0});
- }
+ __ S128Load32Splat(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
case kX64S128Load64Splat: {
@@ -4049,10 +3924,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
ASSEMBLE_SIMD_IMM_INSTR(Pshuflw, dst, 0, half_dup);
- __ Pshufd(dst, dst, uint8_t{0});
+ __ Punpcklqdq(dst, dst);
} else {
ASSEMBLE_SIMD_IMM_INSTR(Pshufhw, dst, 0, half_dup);
- __ Pshufd(dst, dst, uint8_t{0xaa});
+ __ Punpckhqdq(dst, dst);
}
break;
}
@@ -4070,10 +3945,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
__ Pshuflw(dst, dst, half_dup);
- __ Pshufd(dst, dst, uint8_t{0});
+ __ Punpcklqdq(dst, dst);
} else {
__ Pshufhw(dst, dst, half_dup);
- __ Pshufd(dst, dst, uint8_t{0xaa});
+ __ Punpckhqdq(dst, dst);
}
break;
}
@@ -4232,156 +4107,180 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqb);
break;
}
- case kWord32AtomicExchangeInt8: {
+ case kAtomicStoreWord8: {
+ ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord8);
+ break;
+ }
+ case kAtomicStoreWord16: {
+ ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord16);
+ break;
+ }
+ case kAtomicStoreWord32: {
+ ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord32);
+ break;
+ }
+ case kX64Word64AtomicStoreWord64: {
+ ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord64);
+ break;
+ }
+ case kAtomicExchangeInt8: {
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
__ xchgb(i.InputRegister(0), i.MemoryOperand(1));
__ movsxbl(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeUint8: {
+ case kAtomicExchangeUint8: {
__ xchgb(i.InputRegister(0), i.MemoryOperand(1));
- __ movzxbl(i.InputRegister(0), i.InputRegister(0));
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ movzxbl(i.InputRegister(0), i.InputRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ __ movzxbq(i.InputRegister(0), i.InputRegister(0));
+ break;
+ }
break;
}
- case kWord32AtomicExchangeInt16: {
+ case kAtomicExchangeInt16: {
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
__ xchgw(i.InputRegister(0), i.MemoryOperand(1));
__ movsxwl(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeUint16: {
+ case kAtomicExchangeUint16: {
__ xchgw(i.InputRegister(0), i.MemoryOperand(1));
- __ movzxwl(i.InputRegister(0), i.InputRegister(0));
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ movzxwl(i.InputRegister(0), i.InputRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ __ movzxwq(i.InputRegister(0), i.InputRegister(0));
+ break;
+ }
break;
}
- case kWord32AtomicExchangeWord32: {
+ case kAtomicExchangeWord32: {
__ xchgl(i.InputRegister(0), i.MemoryOperand(1));
break;
}
- case kWord32AtomicCompareExchangeInt8: {
+ case kAtomicCompareExchangeInt8: {
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
__ lock();
__ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
__ movsxbl(rax, rax);
break;
}
- case kWord32AtomicCompareExchangeUint8: {
+ case kAtomicCompareExchangeUint8: {
__ lock();
__ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
- __ movzxbl(rax, rax);
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ movzxbl(rax, rax);
+ break;
+ case AtomicWidth::kWord64:
+ __ movzxbq(rax, rax);
+ break;
+ }
break;
}
- case kWord32AtomicCompareExchangeInt16: {
+ case kAtomicCompareExchangeInt16: {
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
__ lock();
__ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
__ movsxwl(rax, rax);
break;
}
- case kWord32AtomicCompareExchangeUint16: {
+ case kAtomicCompareExchangeUint16: {
__ lock();
__ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
- __ movzxwl(rax, rax);
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ movzxwl(rax, rax);
+ break;
+ case AtomicWidth::kWord64:
+ __ movzxwq(rax, rax);
+ break;
+ }
break;
}
- case kWord32AtomicCompareExchangeWord32: {
+ case kAtomicCompareExchangeWord32: {
__ lock();
__ cmpxchgl(i.MemoryOperand(2), i.InputRegister(1));
- break;
- }
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP(inst, movb, cmpxchgb); \
- __ movsxbl(rax, rax); \
- break; \
- case kWord32Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP(inst, movb, cmpxchgb); \
- __ movzxbl(rax, rax); \
- break; \
- case kWord32Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP(inst, movw, cmpxchgw); \
- __ movsxwl(rax, rax); \
- break; \
- case kWord32Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP(inst, movw, cmpxchgw); \
- __ movzxwl(rax, rax); \
- break; \
- case kWord32Atomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(inst, movl, cmpxchgl); \
- break;
- ATOMIC_BINOP_CASE(Add, addl)
- ATOMIC_BINOP_CASE(Sub, subl)
- ATOMIC_BINOP_CASE(And, andl)
- ATOMIC_BINOP_CASE(Or, orl)
- ATOMIC_BINOP_CASE(Xor, xorl)
-#undef ATOMIC_BINOP_CASE
- case kX64Word64AtomicExchangeUint8: {
- __ xchgb(i.InputRegister(0), i.MemoryOperand(1));
- __ movzxbq(i.InputRegister(0), i.InputRegister(0));
- break;
- }
- case kX64Word64AtomicExchangeUint16: {
- __ xchgw(i.InputRegister(0), i.MemoryOperand(1));
- __ movzxwq(i.InputRegister(0), i.InputRegister(0));
- break;
- }
- case kX64Word64AtomicExchangeUint32: {
- __ xchgl(i.InputRegister(0), i.MemoryOperand(1));
+ if (AtomicWidthField::decode(opcode) == AtomicWidth::kWord64) {
+ // Zero-extend the 32 bit value to 64 bit.
+ __ movl(rax, rax);
+ }
break;
}
case kX64Word64AtomicExchangeUint64: {
__ xchgq(i.InputRegister(0), i.MemoryOperand(1));
break;
}
- case kX64Word64AtomicCompareExchangeUint8: {
- __ lock();
- __ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
- __ movzxbq(rax, rax);
- break;
- }
- case kX64Word64AtomicCompareExchangeUint16: {
- __ lock();
- __ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
- __ movzxwq(rax, rax);
- break;
- }
- case kX64Word64AtomicCompareExchangeUint32: {
- __ lock();
- __ cmpxchgl(i.MemoryOperand(2), i.InputRegister(1));
- // Zero-extend the 32 bit value to 64 bit.
- __ movl(rax, rax);
- break;
- }
case kX64Word64AtomicCompareExchangeUint64: {
__ lock();
__ cmpxchgq(i.MemoryOperand(2), i.InputRegister(1));
break;
}
-#define ATOMIC64_BINOP_CASE(op, inst) \
- case kX64Word64Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC64_BINOP(inst, movb, cmpxchgb); \
- __ movzxbq(rax, rax); \
- break; \
- case kX64Word64Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC64_BINOP(inst, movw, cmpxchgw); \
- __ movzxwq(rax, rax); \
- break; \
- case kX64Word64Atomic##op##Uint32: \
- ASSEMBLE_ATOMIC64_BINOP(inst, movl, cmpxchgl); \
- break; \
- case kX64Word64Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC64_BINOP(inst, movq, cmpxchgq); \
+#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
+ case kAtomic##op##Int8: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movb, cmpxchgb); \
+ __ movsxbl(rax, rax); \
+ break; \
+ case kAtomic##op##Uint8: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movb, cmpxchgb); \
+ __ movzxbl(rax, rax); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC64_BINOP(inst64, movb, cmpxchgb); \
+ __ movzxbq(rax, rax); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Int16: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movw, cmpxchgw); \
+ __ movsxwl(rax, rax); \
+ break; \
+ case kAtomic##op##Uint16: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movw, cmpxchgw); \
+ __ movzxwl(rax, rax); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC64_BINOP(inst64, movw, cmpxchgw); \
+ __ movzxwq(rax, rax); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Word32: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movl, cmpxchgl); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC64_BINOP(inst64, movl, cmpxchgl); \
+ break; \
+ } \
+ break; \
+ case kX64Word64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC64_BINOP(inst64, movq, cmpxchgq); \
break;
- ATOMIC64_BINOP_CASE(Add, addq)
- ATOMIC64_BINOP_CASE(Sub, subq)
- ATOMIC64_BINOP_CASE(And, andq)
- ATOMIC64_BINOP_CASE(Or, orq)
- ATOMIC64_BINOP_CASE(Xor, xorq)
-#undef ATOMIC64_BINOP_CASE
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ ATOMIC_BINOP_CASE(Add, addl, addq)
+ ATOMIC_BINOP_CASE(Sub, subl, subq)
+ ATOMIC_BINOP_CASE(And, andl, andq)
+ ATOMIC_BINOP_CASE(Or, orl, orq)
+ ATOMIC_BINOP_CASE(Xor, xorl, xorq)
+#undef ATOMIC_BINOP_CASE
+
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
UNREACHABLE(); // Won't be generated by instruction selector.
}
return kSuccess;
@@ -4407,6 +4306,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef ASSEMBLE_SIMD_IMM_SHUFFLE
#undef ASSEMBLE_SIMD_ALL_TRUE
#undef ASSEMBLE_SIMD_SHIFT
+#undef ASSEMBLE_SEQ_CST_STORE
namespace {
@@ -4462,19 +4362,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ jmp(flabel, flabel_distance);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- condition = NegateFlagsCondition(condition);
- __ Move(kScratchRegister, 0);
- __ cmovq(FlagsConditionToCondition(condition), kSpeculationPoisonRegister,
- kScratchRegister);
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
Label::Distance flabel_distance =
@@ -4716,7 +4603,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= static_cast<int>(osr_helper()->UnoptimizedFrameSlots());
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -4876,18 +4762,24 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// The number of arguments without the receiver is
// max(argc_reg, parameter_slots-1), and the receiver is added in
// DropArguments().
- int parameter_slots_without_receiver = parameter_slots - 1;
Label mismatch_return;
Register scratch_reg = r10;
DCHECK_NE(argc_reg, scratch_reg);
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & scratch_reg.bit());
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
- __ cmpq(argc_reg, Immediate(parameter_slots_without_receiver));
+ if (kJSArgcIncludesReceiver) {
+ __ cmpq(argc_reg, Immediate(parameter_slots));
+ } else {
+ int parameter_slots_without_receiver = parameter_slots - 1;
+ __ cmpq(argc_reg, Immediate(parameter_slots_without_receiver));
+ }
__ j(greater, &mismatch_return, Label::kNear);
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return);
__ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// We use a return instead of a jump for better return address prediction.
__ Ret();
} else if (additional_pop_count->IsImmediate()) {
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index eba23dcfa9..e7fe45c5de 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -11,413 +11,389 @@ namespace compiler {
// X64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(X64Add) \
- V(X64Add32) \
- V(X64And) \
- V(X64And32) \
- V(X64Cmp) \
- V(X64Cmp32) \
- V(X64Cmp16) \
- V(X64Cmp8) \
- V(X64Test) \
- V(X64Test32) \
- V(X64Test16) \
- V(X64Test8) \
- V(X64Or) \
- V(X64Or32) \
- V(X64Xor) \
- V(X64Xor32) \
- V(X64Sub) \
- V(X64Sub32) \
- V(X64Imul) \
- V(X64Imul32) \
- V(X64ImulHigh32) \
- V(X64UmulHigh32) \
- V(X64Idiv) \
- V(X64Idiv32) \
- V(X64Udiv) \
- V(X64Udiv32) \
- V(X64Not) \
- V(X64Not32) \
- V(X64Neg) \
- V(X64Neg32) \
- V(X64Shl) \
- V(X64Shl32) \
- V(X64Shr) \
- V(X64Shr32) \
- V(X64Sar) \
- V(X64Sar32) \
- V(X64Rol) \
- V(X64Rol32) \
- V(X64Ror) \
- V(X64Ror32) \
- V(X64Lzcnt) \
- V(X64Lzcnt32) \
- V(X64Tzcnt) \
- V(X64Tzcnt32) \
- V(X64Popcnt) \
- V(X64Popcnt32) \
- V(X64Bswap) \
- V(X64Bswap32) \
- V(X64MFence) \
- V(X64LFence) \
- V(SSEFloat32Cmp) \
- V(SSEFloat32Add) \
- V(SSEFloat32Sub) \
- V(SSEFloat32Mul) \
- V(SSEFloat32Div) \
- V(SSEFloat32Abs) \
- V(SSEFloat32Neg) \
- V(SSEFloat32Sqrt) \
- V(SSEFloat32ToFloat64) \
- V(SSEFloat32ToInt32) \
- V(SSEFloat32ToUint32) \
- V(SSEFloat32Round) \
- V(SSEFloat64Cmp) \
- V(SSEFloat64Add) \
- V(SSEFloat64Sub) \
- V(SSEFloat64Mul) \
- V(SSEFloat64Div) \
- V(SSEFloat64Mod) \
- V(SSEFloat64Abs) \
- V(SSEFloat64Neg) \
- V(SSEFloat64Sqrt) \
- V(SSEFloat64Round) \
- V(SSEFloat32Max) \
- V(SSEFloat64Max) \
- V(SSEFloat32Min) \
- V(SSEFloat64Min) \
- V(SSEFloat64ToFloat32) \
- V(SSEFloat64ToInt32) \
- V(SSEFloat64ToUint32) \
- V(SSEFloat32ToInt64) \
- V(SSEFloat64ToInt64) \
- V(SSEFloat32ToUint64) \
- V(SSEFloat64ToUint64) \
- V(SSEInt32ToFloat64) \
- V(SSEInt32ToFloat32) \
- V(SSEInt64ToFloat32) \
- V(SSEInt64ToFloat64) \
- V(SSEUint64ToFloat32) \
- V(SSEUint64ToFloat64) \
- V(SSEUint32ToFloat64) \
- V(SSEUint32ToFloat32) \
- V(SSEFloat64ExtractLowWord32) \
- V(SSEFloat64ExtractHighWord32) \
- V(SSEFloat64InsertLowWord32) \
- V(SSEFloat64InsertHighWord32) \
- V(SSEFloat64LoadLowWord32) \
- V(SSEFloat64SilenceNaN) \
- V(AVXFloat32Cmp) \
- V(AVXFloat32Add) \
- V(AVXFloat32Sub) \
- V(AVXFloat32Mul) \
- V(AVXFloat32Div) \
- V(AVXFloat64Cmp) \
- V(AVXFloat64Add) \
- V(AVXFloat64Sub) \
- V(AVXFloat64Mul) \
- V(AVXFloat64Div) \
- V(AVXFloat64Abs) \
- V(AVXFloat64Neg) \
- V(AVXFloat32Abs) \
- V(AVXFloat32Neg) \
- V(X64Movsxbl) \
- V(X64Movzxbl) \
- V(X64Movsxbq) \
- V(X64Movzxbq) \
- V(X64Movb) \
- V(X64Movsxwl) \
- V(X64Movzxwl) \
- V(X64Movsxwq) \
- V(X64Movzxwq) \
- V(X64Movw) \
- V(X64Movl) \
- V(X64Movsxlq) \
- V(X64MovqDecompressTaggedSigned) \
- V(X64MovqDecompressTaggedPointer) \
- V(X64MovqDecompressAnyTagged) \
- V(X64MovqCompressTagged) \
- V(X64Movq) \
- V(X64Movsd) \
- V(X64Movss) \
- V(X64Movdqu) \
- V(X64BitcastFI) \
- V(X64BitcastDL) \
- V(X64BitcastIF) \
- V(X64BitcastLD) \
- V(X64Lea32) \
- V(X64Lea) \
- V(X64Dec32) \
- V(X64Inc32) \
- V(X64Push) \
- V(X64Poke) \
- V(X64Peek) \
- V(X64F64x2Splat) \
- V(X64F64x2ExtractLane) \
- V(X64F64x2ReplaceLane) \
- V(X64F64x2Abs) \
- V(X64F64x2Neg) \
- V(X64F64x2Sqrt) \
- V(X64F64x2Add) \
- V(X64F64x2Sub) \
- V(X64F64x2Mul) \
- V(X64F64x2Div) \
- V(X64F64x2Min) \
- V(X64F64x2Max) \
- V(X64F64x2Eq) \
- V(X64F64x2Ne) \
- V(X64F64x2Lt) \
- V(X64F64x2Le) \
- V(X64F64x2Qfma) \
- V(X64F64x2Qfms) \
- V(X64F64x2Pmin) \
- V(X64F64x2Pmax) \
- V(X64F64x2Round) \
- V(X64F64x2ConvertLowI32x4S) \
- V(X64F64x2ConvertLowI32x4U) \
- V(X64F64x2PromoteLowF32x4) \
- V(X64F32x4Splat) \
- V(X64F32x4ExtractLane) \
- V(X64F32x4ReplaceLane) \
- V(X64F32x4SConvertI32x4) \
- V(X64F32x4UConvertI32x4) \
- V(X64F32x4Abs) \
- V(X64F32x4Neg) \
- V(X64F32x4Sqrt) \
- V(X64F32x4RecipApprox) \
- V(X64F32x4RecipSqrtApprox) \
- V(X64F32x4Add) \
- V(X64F32x4Sub) \
- V(X64F32x4Mul) \
- V(X64F32x4Div) \
- V(X64F32x4Min) \
- V(X64F32x4Max) \
- V(X64F32x4Eq) \
- V(X64F32x4Ne) \
- V(X64F32x4Lt) \
- V(X64F32x4Le) \
- V(X64F32x4Qfma) \
- V(X64F32x4Qfms) \
- V(X64F32x4Pmin) \
- V(X64F32x4Pmax) \
- V(X64F32x4Round) \
- V(X64F32x4DemoteF64x2Zero) \
- V(X64I64x2Splat) \
- V(X64I64x2ExtractLane) \
- V(X64I64x2Abs) \
- V(X64I64x2Neg) \
- V(X64I64x2BitMask) \
- V(X64I64x2Shl) \
- V(X64I64x2ShrS) \
- V(X64I64x2Add) \
- V(X64I64x2Sub) \
- V(X64I64x2Mul) \
- V(X64I64x2Eq) \
- V(X64I64x2GtS) \
- V(X64I64x2GeS) \
- V(X64I64x2Ne) \
- V(X64I64x2ShrU) \
- V(X64I64x2ExtMulLowI32x4S) \
- V(X64I64x2ExtMulHighI32x4S) \
- V(X64I64x2ExtMulLowI32x4U) \
- V(X64I64x2ExtMulHighI32x4U) \
- V(X64I64x2SConvertI32x4Low) \
- V(X64I64x2SConvertI32x4High) \
- V(X64I64x2UConvertI32x4Low) \
- V(X64I64x2UConvertI32x4High) \
- V(X64I32x4Splat) \
- V(X64I32x4ExtractLane) \
- V(X64I32x4SConvertF32x4) \
- V(X64I32x4SConvertI16x8Low) \
- V(X64I32x4SConvertI16x8High) \
- V(X64I32x4Neg) \
- V(X64I32x4Shl) \
- V(X64I32x4ShrS) \
- V(X64I32x4Add) \
- V(X64I32x4Sub) \
- V(X64I32x4Mul) \
- V(X64I32x4MinS) \
- V(X64I32x4MaxS) \
- V(X64I32x4Eq) \
- V(X64I32x4Ne) \
- V(X64I32x4GtS) \
- V(X64I32x4GeS) \
- V(X64I32x4UConvertF32x4) \
- V(X64I32x4UConvertI16x8Low) \
- V(X64I32x4UConvertI16x8High) \
- V(X64I32x4ShrU) \
- V(X64I32x4MinU) \
- V(X64I32x4MaxU) \
- V(X64I32x4GtU) \
- V(X64I32x4GeU) \
- V(X64I32x4Abs) \
- V(X64I32x4BitMask) \
- V(X64I32x4DotI16x8S) \
- V(X64I32x4ExtMulLowI16x8S) \
- V(X64I32x4ExtMulHighI16x8S) \
- V(X64I32x4ExtMulLowI16x8U) \
- V(X64I32x4ExtMulHighI16x8U) \
- V(X64I32x4ExtAddPairwiseI16x8S) \
- V(X64I32x4ExtAddPairwiseI16x8U) \
- V(X64I32x4TruncSatF64x2SZero) \
- V(X64I32x4TruncSatF64x2UZero) \
- V(X64I16x8Splat) \
- V(X64I16x8ExtractLaneS) \
- V(X64I16x8SConvertI8x16Low) \
- V(X64I16x8SConvertI8x16High) \
- V(X64I16x8Neg) \
- V(X64I16x8Shl) \
- V(X64I16x8ShrS) \
- V(X64I16x8SConvertI32x4) \
- V(X64I16x8Add) \
- V(X64I16x8AddSatS) \
- V(X64I16x8Sub) \
- V(X64I16x8SubSatS) \
- V(X64I16x8Mul) \
- V(X64I16x8MinS) \
- V(X64I16x8MaxS) \
- V(X64I16x8Eq) \
- V(X64I16x8Ne) \
- V(X64I16x8GtS) \
- V(X64I16x8GeS) \
- V(X64I16x8UConvertI8x16Low) \
- V(X64I16x8UConvertI8x16High) \
- V(X64I16x8ShrU) \
- V(X64I16x8UConvertI32x4) \
- V(X64I16x8AddSatU) \
- V(X64I16x8SubSatU) \
- V(X64I16x8MinU) \
- V(X64I16x8MaxU) \
- V(X64I16x8GtU) \
- V(X64I16x8GeU) \
- V(X64I16x8RoundingAverageU) \
- V(X64I16x8Abs) \
- V(X64I16x8BitMask) \
- V(X64I16x8ExtMulLowI8x16S) \
- V(X64I16x8ExtMulHighI8x16S) \
- V(X64I16x8ExtMulLowI8x16U) \
- V(X64I16x8ExtMulHighI8x16U) \
- V(X64I16x8ExtAddPairwiseI8x16S) \
- V(X64I16x8ExtAddPairwiseI8x16U) \
- V(X64I16x8Q15MulRSatS) \
- V(X64I8x16Splat) \
- V(X64I8x16ExtractLaneS) \
- V(X64Pinsrb) \
- V(X64Pinsrw) \
- V(X64Pinsrd) \
- V(X64Pinsrq) \
- V(X64Pextrb) \
- V(X64Pextrw) \
- V(X64I8x16SConvertI16x8) \
- V(X64I8x16Neg) \
- V(X64I8x16Shl) \
- V(X64I8x16ShrS) \
- V(X64I8x16Add) \
- V(X64I8x16AddSatS) \
- V(X64I8x16Sub) \
- V(X64I8x16SubSatS) \
- V(X64I8x16MinS) \
- V(X64I8x16MaxS) \
- V(X64I8x16Eq) \
- V(X64I8x16Ne) \
- V(X64I8x16GtS) \
- V(X64I8x16GeS) \
- V(X64I8x16UConvertI16x8) \
- V(X64I8x16AddSatU) \
- V(X64I8x16SubSatU) \
- V(X64I8x16ShrU) \
- V(X64I8x16MinU) \
- V(X64I8x16MaxU) \
- V(X64I8x16GtU) \
- V(X64I8x16GeU) \
- V(X64I8x16RoundingAverageU) \
- V(X64I8x16Abs) \
- V(X64I8x16BitMask) \
- V(X64S128Const) \
- V(X64S128Zero) \
- V(X64S128AllOnes) \
- V(X64S128Not) \
- V(X64S128And) \
- V(X64S128Or) \
- V(X64S128Xor) \
- V(X64S128Select) \
- V(X64S128AndNot) \
- V(X64I8x16Swizzle) \
- V(X64I8x16Shuffle) \
- V(X64I8x16Popcnt) \
- V(X64S128Load8Splat) \
- V(X64S128Load16Splat) \
- V(X64S128Load32Splat) \
- V(X64S128Load64Splat) \
- V(X64S128Load8x8S) \
- V(X64S128Load8x8U) \
- V(X64S128Load16x4S) \
- V(X64S128Load16x4U) \
- V(X64S128Load32x2S) \
- V(X64S128Load32x2U) \
- V(X64S128Store32Lane) \
- V(X64S128Store64Lane) \
- V(X64Shufps) \
- V(X64S32x4Rotate) \
- V(X64S32x4Swizzle) \
- V(X64S32x4Shuffle) \
- V(X64S16x8Blend) \
- V(X64S16x8HalfShuffle1) \
- V(X64S16x8HalfShuffle2) \
- V(X64S8x16Alignr) \
- V(X64S16x8Dup) \
- V(X64S8x16Dup) \
- V(X64S16x8UnzipHigh) \
- V(X64S16x8UnzipLow) \
- V(X64S8x16UnzipHigh) \
- V(X64S8x16UnzipLow) \
- V(X64S64x2UnpackHigh) \
- V(X64S32x4UnpackHigh) \
- V(X64S16x8UnpackHigh) \
- V(X64S8x16UnpackHigh) \
- V(X64S64x2UnpackLow) \
- V(X64S32x4UnpackLow) \
- V(X64S16x8UnpackLow) \
- V(X64S8x16UnpackLow) \
- V(X64S8x16TransposeLow) \
- V(X64S8x16TransposeHigh) \
- V(X64S8x8Reverse) \
- V(X64S8x4Reverse) \
- V(X64S8x2Reverse) \
- V(X64V128AnyTrue) \
- V(X64I64x2AllTrue) \
- V(X64I32x4AllTrue) \
- V(X64I16x8AllTrue) \
- V(X64I8x16AllTrue) \
- V(X64Word64AtomicAddUint8) \
- V(X64Word64AtomicAddUint16) \
- V(X64Word64AtomicAddUint32) \
- V(X64Word64AtomicAddUint64) \
- V(X64Word64AtomicSubUint8) \
- V(X64Word64AtomicSubUint16) \
- V(X64Word64AtomicSubUint32) \
- V(X64Word64AtomicSubUint64) \
- V(X64Word64AtomicAndUint8) \
- V(X64Word64AtomicAndUint16) \
- V(X64Word64AtomicAndUint32) \
- V(X64Word64AtomicAndUint64) \
- V(X64Word64AtomicOrUint8) \
- V(X64Word64AtomicOrUint16) \
- V(X64Word64AtomicOrUint32) \
- V(X64Word64AtomicOrUint64) \
- V(X64Word64AtomicXorUint8) \
- V(X64Word64AtomicXorUint16) \
- V(X64Word64AtomicXorUint32) \
- V(X64Word64AtomicXorUint64) \
- V(X64Word64AtomicExchangeUint8) \
- V(X64Word64AtomicExchangeUint16) \
- V(X64Word64AtomicExchangeUint32) \
- V(X64Word64AtomicExchangeUint64) \
- V(X64Word64AtomicCompareExchangeUint8) \
- V(X64Word64AtomicCompareExchangeUint16) \
- V(X64Word64AtomicCompareExchangeUint32) \
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(X64Add) \
+ V(X64Add32) \
+ V(X64And) \
+ V(X64And32) \
+ V(X64Cmp) \
+ V(X64Cmp32) \
+ V(X64Cmp16) \
+ V(X64Cmp8) \
+ V(X64Test) \
+ V(X64Test32) \
+ V(X64Test16) \
+ V(X64Test8) \
+ V(X64Or) \
+ V(X64Or32) \
+ V(X64Xor) \
+ V(X64Xor32) \
+ V(X64Sub) \
+ V(X64Sub32) \
+ V(X64Imul) \
+ V(X64Imul32) \
+ V(X64ImulHigh32) \
+ V(X64UmulHigh32) \
+ V(X64Idiv) \
+ V(X64Idiv32) \
+ V(X64Udiv) \
+ V(X64Udiv32) \
+ V(X64Not) \
+ V(X64Not32) \
+ V(X64Neg) \
+ V(X64Neg32) \
+ V(X64Shl) \
+ V(X64Shl32) \
+ V(X64Shr) \
+ V(X64Shr32) \
+ V(X64Sar) \
+ V(X64Sar32) \
+ V(X64Rol) \
+ V(X64Rol32) \
+ V(X64Ror) \
+ V(X64Ror32) \
+ V(X64Lzcnt) \
+ V(X64Lzcnt32) \
+ V(X64Tzcnt) \
+ V(X64Tzcnt32) \
+ V(X64Popcnt) \
+ V(X64Popcnt32) \
+ V(X64Bswap) \
+ V(X64Bswap32) \
+ V(X64MFence) \
+ V(X64LFence) \
+ V(SSEFloat32Cmp) \
+ V(SSEFloat32Add) \
+ V(SSEFloat32Sub) \
+ V(SSEFloat32Mul) \
+ V(SSEFloat32Div) \
+ V(SSEFloat32Sqrt) \
+ V(SSEFloat32ToFloat64) \
+ V(SSEFloat32ToInt32) \
+ V(SSEFloat32ToUint32) \
+ V(SSEFloat32Round) \
+ V(SSEFloat64Cmp) \
+ V(SSEFloat64Add) \
+ V(SSEFloat64Sub) \
+ V(SSEFloat64Mul) \
+ V(SSEFloat64Div) \
+ V(SSEFloat64Mod) \
+ V(SSEFloat64Sqrt) \
+ V(SSEFloat64Round) \
+ V(SSEFloat32Max) \
+ V(SSEFloat64Max) \
+ V(SSEFloat32Min) \
+ V(SSEFloat64Min) \
+ V(SSEFloat64ToFloat32) \
+ V(SSEFloat64ToInt32) \
+ V(SSEFloat64ToUint32) \
+ V(SSEFloat32ToInt64) \
+ V(SSEFloat64ToInt64) \
+ V(SSEFloat32ToUint64) \
+ V(SSEFloat64ToUint64) \
+ V(SSEInt32ToFloat64) \
+ V(SSEInt32ToFloat32) \
+ V(SSEInt64ToFloat32) \
+ V(SSEInt64ToFloat64) \
+ V(SSEUint64ToFloat32) \
+ V(SSEUint64ToFloat64) \
+ V(SSEUint32ToFloat64) \
+ V(SSEUint32ToFloat32) \
+ V(SSEFloat64ExtractLowWord32) \
+ V(SSEFloat64ExtractHighWord32) \
+ V(SSEFloat64InsertLowWord32) \
+ V(SSEFloat64InsertHighWord32) \
+ V(SSEFloat64LoadLowWord32) \
+ V(SSEFloat64SilenceNaN) \
+ V(AVXFloat32Cmp) \
+ V(AVXFloat32Add) \
+ V(AVXFloat32Sub) \
+ V(AVXFloat32Mul) \
+ V(AVXFloat32Div) \
+ V(AVXFloat64Cmp) \
+ V(AVXFloat64Add) \
+ V(AVXFloat64Sub) \
+ V(AVXFloat64Mul) \
+ V(AVXFloat64Div) \
+ V(X64Float64Abs) \
+ V(X64Float64Neg) \
+ V(X64Float32Abs) \
+ V(X64Float32Neg) \
+ V(X64Movsxbl) \
+ V(X64Movzxbl) \
+ V(X64Movsxbq) \
+ V(X64Movzxbq) \
+ V(X64Movb) \
+ V(X64Movsxwl) \
+ V(X64Movzxwl) \
+ V(X64Movsxwq) \
+ V(X64Movzxwq) \
+ V(X64Movw) \
+ V(X64Movl) \
+ V(X64Movsxlq) \
+ V(X64MovqDecompressTaggedSigned) \
+ V(X64MovqDecompressTaggedPointer) \
+ V(X64MovqDecompressAnyTagged) \
+ V(X64MovqCompressTagged) \
+ V(X64Movq) \
+ V(X64Movsd) \
+ V(X64Movss) \
+ V(X64Movdqu) \
+ V(X64BitcastFI) \
+ V(X64BitcastDL) \
+ V(X64BitcastIF) \
+ V(X64BitcastLD) \
+ V(X64Lea32) \
+ V(X64Lea) \
+ V(X64Dec32) \
+ V(X64Inc32) \
+ V(X64Push) \
+ V(X64Poke) \
+ V(X64Peek) \
+ V(X64F64x2Splat) \
+ V(X64F64x2ExtractLane) \
+ V(X64F64x2ReplaceLane) \
+ V(X64F64x2Abs) \
+ V(X64F64x2Neg) \
+ V(X64F64x2Sqrt) \
+ V(X64F64x2Add) \
+ V(X64F64x2Sub) \
+ V(X64F64x2Mul) \
+ V(X64F64x2Div) \
+ V(X64F64x2Min) \
+ V(X64F64x2Max) \
+ V(X64F64x2Eq) \
+ V(X64F64x2Ne) \
+ V(X64F64x2Lt) \
+ V(X64F64x2Le) \
+ V(X64F64x2Qfma) \
+ V(X64F64x2Qfms) \
+ V(X64F64x2Pmin) \
+ V(X64F64x2Pmax) \
+ V(X64F64x2Round) \
+ V(X64F64x2ConvertLowI32x4S) \
+ V(X64F64x2ConvertLowI32x4U) \
+ V(X64F64x2PromoteLowF32x4) \
+ V(X64F32x4Splat) \
+ V(X64F32x4ExtractLane) \
+ V(X64F32x4ReplaceLane) \
+ V(X64F32x4SConvertI32x4) \
+ V(X64F32x4UConvertI32x4) \
+ V(X64F32x4Abs) \
+ V(X64F32x4Neg) \
+ V(X64F32x4Sqrt) \
+ V(X64F32x4RecipApprox) \
+ V(X64F32x4RecipSqrtApprox) \
+ V(X64F32x4Add) \
+ V(X64F32x4Sub) \
+ V(X64F32x4Mul) \
+ V(X64F32x4Div) \
+ V(X64F32x4Min) \
+ V(X64F32x4Max) \
+ V(X64F32x4Eq) \
+ V(X64F32x4Ne) \
+ V(X64F32x4Lt) \
+ V(X64F32x4Le) \
+ V(X64F32x4Qfma) \
+ V(X64F32x4Qfms) \
+ V(X64F32x4Pmin) \
+ V(X64F32x4Pmax) \
+ V(X64F32x4Round) \
+ V(X64F32x4DemoteF64x2Zero) \
+ V(X64I64x2Splat) \
+ V(X64I64x2ExtractLane) \
+ V(X64I64x2Abs) \
+ V(X64I64x2Neg) \
+ V(X64I64x2BitMask) \
+ V(X64I64x2Shl) \
+ V(X64I64x2ShrS) \
+ V(X64I64x2Add) \
+ V(X64I64x2Sub) \
+ V(X64I64x2Mul) \
+ V(X64I64x2Eq) \
+ V(X64I64x2GtS) \
+ V(X64I64x2GeS) \
+ V(X64I64x2Ne) \
+ V(X64I64x2ShrU) \
+ V(X64I64x2ExtMulLowI32x4S) \
+ V(X64I64x2ExtMulHighI32x4S) \
+ V(X64I64x2ExtMulLowI32x4U) \
+ V(X64I64x2ExtMulHighI32x4U) \
+ V(X64I64x2SConvertI32x4Low) \
+ V(X64I64x2SConvertI32x4High) \
+ V(X64I64x2UConvertI32x4Low) \
+ V(X64I64x2UConvertI32x4High) \
+ V(X64I32x4Splat) \
+ V(X64I32x4ExtractLane) \
+ V(X64I32x4SConvertF32x4) \
+ V(X64I32x4SConvertI16x8Low) \
+ V(X64I32x4SConvertI16x8High) \
+ V(X64I32x4Neg) \
+ V(X64I32x4Shl) \
+ V(X64I32x4ShrS) \
+ V(X64I32x4Add) \
+ V(X64I32x4Sub) \
+ V(X64I32x4Mul) \
+ V(X64I32x4MinS) \
+ V(X64I32x4MaxS) \
+ V(X64I32x4Eq) \
+ V(X64I32x4Ne) \
+ V(X64I32x4GtS) \
+ V(X64I32x4GeS) \
+ V(X64I32x4UConvertF32x4) \
+ V(X64I32x4UConvertI16x8Low) \
+ V(X64I32x4UConvertI16x8High) \
+ V(X64I32x4ShrU) \
+ V(X64I32x4MinU) \
+ V(X64I32x4MaxU) \
+ V(X64I32x4GtU) \
+ V(X64I32x4GeU) \
+ V(X64I32x4Abs) \
+ V(X64I32x4BitMask) \
+ V(X64I32x4DotI16x8S) \
+ V(X64I32x4ExtMulLowI16x8S) \
+ V(X64I32x4ExtMulHighI16x8S) \
+ V(X64I32x4ExtMulLowI16x8U) \
+ V(X64I32x4ExtMulHighI16x8U) \
+ V(X64I32x4ExtAddPairwiseI16x8S) \
+ V(X64I32x4ExtAddPairwiseI16x8U) \
+ V(X64I32x4TruncSatF64x2SZero) \
+ V(X64I32x4TruncSatF64x2UZero) \
+ V(X64I16x8Splat) \
+ V(X64I16x8ExtractLaneS) \
+ V(X64I16x8SConvertI8x16Low) \
+ V(X64I16x8SConvertI8x16High) \
+ V(X64I16x8Neg) \
+ V(X64I16x8Shl) \
+ V(X64I16x8ShrS) \
+ V(X64I16x8SConvertI32x4) \
+ V(X64I16x8Add) \
+ V(X64I16x8AddSatS) \
+ V(X64I16x8Sub) \
+ V(X64I16x8SubSatS) \
+ V(X64I16x8Mul) \
+ V(X64I16x8MinS) \
+ V(X64I16x8MaxS) \
+ V(X64I16x8Eq) \
+ V(X64I16x8Ne) \
+ V(X64I16x8GtS) \
+ V(X64I16x8GeS) \
+ V(X64I16x8UConvertI8x16Low) \
+ V(X64I16x8UConvertI8x16High) \
+ V(X64I16x8ShrU) \
+ V(X64I16x8UConvertI32x4) \
+ V(X64I16x8AddSatU) \
+ V(X64I16x8SubSatU) \
+ V(X64I16x8MinU) \
+ V(X64I16x8MaxU) \
+ V(X64I16x8GtU) \
+ V(X64I16x8GeU) \
+ V(X64I16x8RoundingAverageU) \
+ V(X64I16x8Abs) \
+ V(X64I16x8BitMask) \
+ V(X64I16x8ExtMulLowI8x16S) \
+ V(X64I16x8ExtMulHighI8x16S) \
+ V(X64I16x8ExtMulLowI8x16U) \
+ V(X64I16x8ExtMulHighI8x16U) \
+ V(X64I16x8ExtAddPairwiseI8x16S) \
+ V(X64I16x8ExtAddPairwiseI8x16U) \
+ V(X64I16x8Q15MulRSatS) \
+ V(X64I8x16Splat) \
+ V(X64I8x16ExtractLaneS) \
+ V(X64Pinsrb) \
+ V(X64Pinsrw) \
+ V(X64Pinsrd) \
+ V(X64Pinsrq) \
+ V(X64Pextrb) \
+ V(X64Pextrw) \
+ V(X64I8x16SConvertI16x8) \
+ V(X64I8x16Neg) \
+ V(X64I8x16Shl) \
+ V(X64I8x16ShrS) \
+ V(X64I8x16Add) \
+ V(X64I8x16AddSatS) \
+ V(X64I8x16Sub) \
+ V(X64I8x16SubSatS) \
+ V(X64I8x16MinS) \
+ V(X64I8x16MaxS) \
+ V(X64I8x16Eq) \
+ V(X64I8x16Ne) \
+ V(X64I8x16GtS) \
+ V(X64I8x16GeS) \
+ V(X64I8x16UConvertI16x8) \
+ V(X64I8x16AddSatU) \
+ V(X64I8x16SubSatU) \
+ V(X64I8x16ShrU) \
+ V(X64I8x16MinU) \
+ V(X64I8x16MaxU) \
+ V(X64I8x16GtU) \
+ V(X64I8x16GeU) \
+ V(X64I8x16RoundingAverageU) \
+ V(X64I8x16Abs) \
+ V(X64I8x16BitMask) \
+ V(X64S128Const) \
+ V(X64S128Zero) \
+ V(X64S128AllOnes) \
+ V(X64S128Not) \
+ V(X64S128And) \
+ V(X64S128Or) \
+ V(X64S128Xor) \
+ V(X64S128Select) \
+ V(X64S128AndNot) \
+ V(X64I8x16Swizzle) \
+ V(X64I8x16Shuffle) \
+ V(X64I8x16Popcnt) \
+ V(X64S128Load8Splat) \
+ V(X64S128Load16Splat) \
+ V(X64S128Load32Splat) \
+ V(X64S128Load64Splat) \
+ V(X64S128Load8x8S) \
+ V(X64S128Load8x8U) \
+ V(X64S128Load16x4S) \
+ V(X64S128Load16x4U) \
+ V(X64S128Load32x2S) \
+ V(X64S128Load32x2U) \
+ V(X64S128Store32Lane) \
+ V(X64S128Store64Lane) \
+ V(X64Shufps) \
+ V(X64S32x4Rotate) \
+ V(X64S32x4Swizzle) \
+ V(X64S32x4Shuffle) \
+ V(X64S16x8Blend) \
+ V(X64S16x8HalfShuffle1) \
+ V(X64S16x8HalfShuffle2) \
+ V(X64S8x16Alignr) \
+ V(X64S16x8Dup) \
+ V(X64S8x16Dup) \
+ V(X64S16x8UnzipHigh) \
+ V(X64S16x8UnzipLow) \
+ V(X64S8x16UnzipHigh) \
+ V(X64S8x16UnzipLow) \
+ V(X64S64x2UnpackHigh) \
+ V(X64S32x4UnpackHigh) \
+ V(X64S16x8UnpackHigh) \
+ V(X64S8x16UnpackHigh) \
+ V(X64S64x2UnpackLow) \
+ V(X64S32x4UnpackLow) \
+ V(X64S16x8UnpackLow) \
+ V(X64S8x16UnpackLow) \
+ V(X64S8x16TransposeLow) \
+ V(X64S8x16TransposeHigh) \
+ V(X64S8x8Reverse) \
+ V(X64S8x4Reverse) \
+ V(X64S8x2Reverse) \
+ V(X64V128AnyTrue) \
+ V(X64I64x2AllTrue) \
+ V(X64I32x4AllTrue) \
+ V(X64I16x8AllTrue) \
+ V(X64I8x16AllTrue) \
+ V(X64Word64AtomicAddUint64) \
+ V(X64Word64AtomicSubUint64) \
+ V(X64Word64AtomicAndUint64) \
+ V(X64Word64AtomicOrUint64) \
+ V(X64Word64AtomicXorUint64) \
+ V(X64Word64AtomicStoreWord64) \
+ V(X64Word64AtomicExchangeUint64) \
V(X64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index 4fada93a31..d5f33d86bc 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -62,8 +62,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat32Sub:
case kSSEFloat32Mul:
case kSSEFloat32Div:
- case kSSEFloat32Abs:
- case kSSEFloat32Neg:
case kSSEFloat32Sqrt:
case kSSEFloat32Round:
case kSSEFloat32ToFloat64:
@@ -73,8 +71,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat64Mul:
case kSSEFloat64Div:
case kSSEFloat64Mod:
- case kSSEFloat64Abs:
- case kSSEFloat64Neg:
case kSSEFloat64Sqrt:
case kSSEFloat64Round:
case kSSEFloat32Max:
@@ -114,10 +110,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXFloat64Sub:
case kAVXFloat64Mul:
case kAVXFloat64Div:
- case kAVXFloat64Abs:
- case kAVXFloat64Neg:
- case kAVXFloat32Abs:
- case kAVXFloat32Neg:
+ case kX64Float64Abs:
+ case kX64Float64Neg:
+ case kX64Float32Abs:
+ case kX64Float32Neg:
case kX64BitcastFI:
case kX64BitcastDL:
case kX64BitcastIF:
@@ -422,33 +418,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64LFence:
return kHasSideEffect;
- case kX64Word64AtomicAddUint8:
- case kX64Word64AtomicAddUint16:
- case kX64Word64AtomicAddUint32:
+ case kX64Word64AtomicStoreWord64:
case kX64Word64AtomicAddUint64:
- case kX64Word64AtomicSubUint8:
- case kX64Word64AtomicSubUint16:
- case kX64Word64AtomicSubUint32:
case kX64Word64AtomicSubUint64:
- case kX64Word64AtomicAndUint8:
- case kX64Word64AtomicAndUint16:
- case kX64Word64AtomicAndUint32:
case kX64Word64AtomicAndUint64:
- case kX64Word64AtomicOrUint8:
- case kX64Word64AtomicOrUint16:
- case kX64Word64AtomicOrUint32:
case kX64Word64AtomicOrUint64:
- case kX64Word64AtomicXorUint8:
- case kX64Word64AtomicXorUint16:
- case kX64Word64AtomicXorUint32:
case kX64Word64AtomicXorUint64:
- case kX64Word64AtomicExchangeUint8:
- case kX64Word64AtomicExchangeUint16:
- case kX64Word64AtomicExchangeUint32:
case kX64Word64AtomicExchangeUint64:
- case kX64Word64AtomicCompareExchangeUint8:
- case kX64Word64AtomicCompareExchangeUint16:
- case kX64Word64AtomicCompareExchangeUint32:
case kX64Word64AtomicCompareExchangeUint64:
return kHasSideEffect;
@@ -472,18 +448,18 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kX64Imul32:
case kX64ImulHigh32:
case kX64UmulHigh32:
+ case kX64Float32Abs:
+ case kX64Float32Neg:
+ case kX64Float64Abs:
+ case kX64Float64Neg:
case kSSEFloat32Cmp:
case kSSEFloat32Add:
case kSSEFloat32Sub:
- case kSSEFloat32Abs:
- case kSSEFloat32Neg:
case kSSEFloat64Cmp:
case kSSEFloat64Add:
case kSSEFloat64Sub:
case kSSEFloat64Max:
case kSSEFloat64Min:
- case kSSEFloat64Abs:
- case kSSEFloat64Neg:
return 3;
case kSSEFloat32Mul:
case kSSEFloat32ToFloat64:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index 53ee75064b..2f44f0dee5 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -250,6 +250,7 @@ class X64OperandGenerator final : public OperandGenerator {
};
namespace {
+
ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
ArchOpcode opcode;
switch (load_rep.representation()) {
@@ -340,6 +341,30 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
UNREACHABLE();
}
+ArchOpcode GetSeqCstStoreOpcode(StoreRepresentation store_rep) {
+ switch (store_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ return kAtomicStoreWord8;
+ case MachineRepresentation::kWord16:
+ return kAtomicStoreWord16;
+ case MachineRepresentation::kWord32:
+ return kAtomicStoreWord32;
+ case MachineRepresentation::kWord64:
+ return kX64Word64AtomicStoreWord64;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ if (COMPRESS_POINTERS_BOOL) return kAtomicStoreWord32;
+ return kX64Word64AtomicStoreWord64;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ CHECK(COMPRESS_POINTERS_BOOL);
+ return kAtomicStoreWord32;
+ default:
+ UNREACHABLE();
+ }
+}
+
} // namespace
void InstructionSelector::VisitStackSlot(Node* node) {
@@ -471,9 +496,6 @@ void InstructionSelector::VisitLoad(Node* node, Node* value,
InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kProtectedLoad) {
code |= AccessModeField::encode(kMemoryAccessProtected);
- } else if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- code |= AccessModeField::encode(kMemoryAccessPoisoned);
}
Emit(code, 1, outputs, input_count, inputs, temp_count, temps);
}
@@ -484,19 +506,39 @@ void InstructionSelector::VisitLoad(Node* node) {
VisitLoad(node, node, GetLoadOpcode(load_rep));
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
-void InstructionSelector::VisitStore(Node* node) {
- X64OperandGenerator g(this);
+namespace {
+
+// Shared routine for Word32/Word64 Atomic Exchange
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, AtomicWidth width) {
+ X64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(value), g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
+}
+
+void VisitStoreCommon(InstructionSelector* selector, Node* node,
+ StoreRepresentation store_rep,
+ base::Optional<AtomicMemoryOrder> atomic_order) {
+ X64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
DCHECK_NE(store_rep.representation(), MachineRepresentation::kMapWord);
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ const bool is_seqcst =
+ atomic_order && *atomic_order == AtomicMemoryOrder::kSeqCst;
if (FLAG_enable_unconditional_write_barriers &&
CanBeTaggedOrCompressedPointer(store_rep.representation())) {
@@ -513,16 +555,13 @@ void InstructionSelector::VisitStore(Node* node) {
RecordWriteMode record_write_mode =
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- InstructionCode code = kArchStoreWithWriteBarrier;
+ InstructionCode code = is_seqcst ? kArchAtomicStoreWithWriteBarrier
+ : kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
+ selector->Emit(code, 0, nullptr, arraysize(inputs), inputs,
+ arraysize(temps), temps);
} else {
- if ((ElementSizeLog2Of(store_rep.representation()) <
- kSystemPointerSizeLog2) &&
- value->opcode() == IrOpcode::kTruncateInt64ToInt32) {
- value = value->InputAt(0);
- }
#ifdef V8_IS_TSAN
// On TSAN builds we require two scratch registers. Because of this we also
// have to modify the inputs to take into account possible aliasing and use
@@ -536,22 +575,54 @@ void InstructionSelector::VisitStore(Node* node) {
auto reg_kind = OperandGenerator::RegisterUseKind::kUseRegister;
#endif // V8_IS_TSAN
+ // Release and non-atomic stores emit MOV and sequentially consistent stores
+ // emit XCHG.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+
+ ArchOpcode opcode;
+ AddressingMode addressing_mode;
InstructionOperand inputs[4];
size_t input_count = 0;
- AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
- node, inputs, &input_count, reg_kind);
- InstructionOperand value_operand = g.CanBeImmediate(value)
- ? g.UseImmediate(value)
- : g.UseRegister(value, reg_kind);
- inputs[input_count++] = value_operand;
- ArchOpcode opcode = GetStoreOpcode(store_rep);
+
+ if (is_seqcst) {
+ // SeqCst stores emit XCHG instead of MOV, so encode the inputs as we
+ // would for XCHG. XCHG can't encode the value as an immediate and has
+ // fewer addressing modes available.
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] =
+ g.GetEffectiveIndexOperand(index, &addressing_mode);
+ opcode = GetSeqCstStoreOpcode(store_rep);
+ } else {
+ if ((ElementSizeLog2Of(store_rep.representation()) <
+ kSystemPointerSizeLog2) &&
+ value->opcode() == IrOpcode::kTruncateInt64ToInt32) {
+ value = value->InputAt(0);
+ }
+
+ addressing_mode = g.GetEffectiveAddressMemoryOperand(
+ node, inputs, &input_count, reg_kind);
+ InstructionOperand value_operand = g.CanBeImmediate(value)
+ ? g.UseImmediate(value)
+ : g.UseRegister(value, reg_kind);
+ inputs[input_count++] = value_operand;
+ opcode = GetStoreOpcode(store_rep);
+ }
+
InstructionCode code =
opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
- inputs, temp_count, temps);
+ selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
+ input_count, inputs, temp_count, temps);
}
}
+} // namespace
+
+void InstructionSelector::VisitStore(Node* node) {
+ return VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
+ base::nullopt);
+}
+
void InstructionSelector::VisitProtectedStore(Node* node) {
X64OperandGenerator g(this);
Node* value = node->InputAt(2);
@@ -1502,8 +1573,7 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
}
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
- case IrOpcode::kProtectedLoad:
- case IrOpcode::kPoisonedLoad: {
+ case IrOpcode::kProtectedLoad: {
// The movzxbl/movsxbl/movzxwl/movsxwl/movl operations implicitly
// zero-extend to 64-bit on x64, so the zero-extension is a no-op.
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -1622,15 +1692,12 @@ void VisitFloatBinop(InstructionSelector* selector, Node* node,
}
void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
- ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ ArchOpcode opcode) {
X64OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempDoubleRegister()};
if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineAsRegister(node), g.UseUnique(input),
- arraysize(temps), temps);
+ selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(input));
} else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input),
- arraysize(temps), temps);
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
}
}
@@ -1770,7 +1837,7 @@ void InstructionSelector::VisitFloat32Div(Node* node) {
}
void InstructionSelector::VisitFloat32Abs(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
+ VisitFloatUnop(this, node, node->InputAt(0), kX64Float32Abs);
}
void InstructionSelector::VisitFloat32Max(Node* node) {
@@ -1814,7 +1881,7 @@ void InstructionSelector::VisitFloat64Min(Node* node) {
}
void InstructionSelector::VisitFloat64Abs(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
+ VisitFloatUnop(this, node, node->InputAt(0), kX64Float64Abs);
}
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
@@ -1822,11 +1889,11 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
void InstructionSelector::VisitFloat32Neg(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
+ VisitFloatUnop(this, node, node->InputAt(0), kX64Float32Neg);
}
void InstructionSelector::VisitFloat64Neg(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg);
+ VisitFloatUnop(this, node, node->InputAt(0), kX64Float64Neg);
}
void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
@@ -2294,7 +2361,7 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
// Shared routine for Word32/Word64 Atomic Binops
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
X64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2305,14 +2372,15 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
InstructionOperand temps[] = {g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
// Shared routine for Word32/Word64 Atomic CmpExchg
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
X64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2324,23 +2392,8 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
g.UseUniqueRegister(base),
g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
-}
-
-// Shared routine for Word32/Word64 Atomic Exchange
-void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
- X64OperandGenerator g(selector);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
- AddressingMode addressing_mode;
- InstructionOperand inputs[] = {
- g.UseUniqueRegister(value), g.UseUniqueRegister(base),
- g.GetEffectiveIndexOperand(index, &addressing_mode)};
- InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
@@ -2711,131 +2764,114 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
- load_rep.representation() == MachineRepresentation::kWord16 ||
- load_rep.representation() == MachineRepresentation::kWord32);
- USE(load_rep);
- VisitLoad(node);
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ DCHECK(IsIntegral(load_rep.representation()) ||
+ IsAnyTagged(load_rep.representation()) ||
+ (COMPRESS_POINTERS_BOOL &&
+ CanBeCompressedPointer(load_rep.representation())));
+ DCHECK_NE(load_rep.representation(), MachineRepresentation::kWord64);
+ DCHECK(!load_rep.IsMapWord());
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ VisitLoad(node, node, GetLoadOpcode(load_rep));
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- USE(load_rep);
- VisitLoad(node);
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ DCHECK(!atomic_load_params.representation().IsMapWord());
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ VisitLoad(node, node, GetLoadOpcode(atomic_load_params.representation()));
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicExchangeInt8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicExchangeInt16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicExchangeWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicExchange(this, node, opcode);
+ AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
+ DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
+ DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
+ kTaggedSize == 4);
+ VisitStoreCommon(this, node, params.store_representation(), params.order());
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kX64Word64AtomicExchangeUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kX64Word64AtomicExchangeUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kX64Word64AtomicExchangeUint32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kX64Word64AtomicExchangeUint64;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicExchange(this, node, opcode);
+ AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
+ DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
+ kTaggedSize == 8);
+ VisitStoreCommon(this, node, params.store_representation(), params.order());
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Uint8()) {
- opcode = kX64Word64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kX64Word64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kX64Word64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kX64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Uint8()) {
- opcode = kX64Word64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kX64Word64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kX64Word64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kX64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
@@ -2856,15 +2892,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2889,14 +2924,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kX64Word64Atomic##op##Uint8, kX64Word64Atomic##op##Uint16, \
- kX64Word64Atomic##op##Uint32, kX64Word64Atomic##op##Uint64); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kX64Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -3053,6 +3088,7 @@ VISIT_ATOMIC_BINOP(Xor)
#define SIMD_NARROW_SHIFT_OPCODES(V) \
V(I8x16Shl) \
+ V(I8x16ShrS) \
V(I8x16ShrU)
void InstructionSelector::VisitS128Const(Node* node) {
@@ -3182,19 +3218,19 @@ SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
#undef SIMD_SHIFT_OPCODES
-#define VISIT_SIMD_NARROW_SHIFT(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()}; \
- if (g.CanBeImmediate(node->InputAt(1))) { \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1)), \
- arraysize(temps), temps); \
- } else { \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseUniqueRegister(node->InputAt(0)), \
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
- } \
+#define VISIT_SIMD_NARROW_SHIFT(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ InstructionOperand output = \
+ IsSupported(AVX) ? g.UseRegister(node) : g.DefineSameAsFirst(node); \
+ if (g.CanBeImmediate(node->InputAt(1))) { \
+ Emit(kX64##Opcode, output, g.UseRegister(node->InputAt(0)), \
+ g.UseImmediate(node->InputAt(1))); \
+ } else { \
+ InstructionOperand temps[] = {g.TempSimd128Register()}; \
+ Emit(kX64##Opcode, output, g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
+ } \
}
SIMD_NARROW_SHIFT_OPCODES(VISIT_SIMD_NARROW_SHIFT)
#undef VISIT_SIMD_NARROW_SHIFT
@@ -3257,15 +3293,11 @@ void InstructionSelector::VisitS128AndNot(Node* node) {
}
void InstructionSelector::VisitF64x2Abs(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64F64x2Abs, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)));
+ VisitFloatUnop(this, node, node->InputAt(0), kX64F64x2Abs);
}
void InstructionSelector::VisitF64x2Neg(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64F64x2Neg, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)));
+ VisitFloatUnop(this, node, node->InputAt(0), kX64F64x2Neg);
}
void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
@@ -3274,12 +3306,11 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
g.UseRegister(node->InputAt(0)));
}
-#define VISIT_SIMD_QFMOP(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
- g.UseRegister(node->InputAt(2))); \
+#define VISIT_SIMD_QFMOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ Emit(kX64##Opcode, g.UseRegister(node), g.UseRegister(node->InputAt(0)), \
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2))); \
}
VISIT_SIMD_QFMOP(F64x2Qfma)
VISIT_SIMD_QFMOP(F64x2Qfms)
@@ -3321,7 +3352,8 @@ void InstructionSelector::VisitI64x2Mul(Node* node) {
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
X64OperandGenerator g(this);
- Emit(kX64I32x4SConvertF32x4, g.DefineSameAsFirst(node),
+ Emit(kX64I32x4SConvertF32x4,
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)));
}
@@ -3333,19 +3365,6 @@ void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
}
-void InstructionSelector::VisitI8x16ShrS(Node* node) {
- X64OperandGenerator g(this);
- if (g.CanBeImmediate(node->InputAt(1))) {
- Emit(kX64I8x16ShrS, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1)));
- } else {
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
- Emit(kX64I8x16ShrS, g.DefineSameAsFirst(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
- }
-}
-
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index a864012a7a..1515340503 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -135,7 +135,6 @@ Reduction BranchElimination::ReduceBranch(Node* node) {
bool condition_value;
// If we know the condition we can discard the branch.
if (from_input.LookupCondition(condition, &branch, &condition_value)) {
- MarkAsSafetyCheckIfNeeded(branch, node);
for (Node* const use : node->uses()) {
switch (use->opcode()) {
case IrOpcode::kIfTrue:
@@ -215,7 +214,6 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
Node* branch;
// If we know the condition we can discard the branch.
if (conditions.LookupCondition(condition, &branch, &condition_value)) {
- MarkAsSafetyCheckIfNeeded(branch, node);
if (condition_is_true == condition_value) {
// We don't update the conditions here, because we're replacing {node}
// with the {control} node that already contains the right information.
@@ -410,21 +408,6 @@ bool BranchElimination::ControlPathConditions::BlocksAndConditionsInvariant() {
}
#endif
-void BranchElimination::MarkAsSafetyCheckIfNeeded(Node* branch, Node* node) {
- // Check if {branch} is dead because we might have a stale side-table entry.
- if (!branch->IsDead() && branch->opcode() != IrOpcode::kDead &&
- branch->opcode() != IrOpcode::kTrapIf &&
- branch->opcode() != IrOpcode::kTrapUnless) {
- IsSafetyCheck branch_safety = IsSafetyCheckOf(branch->op());
- IsSafetyCheck combined_safety =
- CombineSafetyChecks(branch_safety, IsSafetyCheckOf(node->op()));
- if (branch_safety != combined_safety) {
- NodeProperties::ChangeOp(
- branch, common()->MarkAsSafetyCheck(branch->op(), combined_safety));
- }
- }
-}
-
Graph* BranchElimination::graph() const { return jsgraph()->graph(); }
Isolate* BranchElimination::isolate() const { return jsgraph()->isolate(); }
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
index 9078c39038..93bacbff7b 100644
--- a/deps/v8/src/compiler/branch-elimination.h
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -114,7 +114,6 @@ class V8_EXPORT_PRIVATE BranchElimination final
Reduction UpdateConditions(Node* node, ControlPathConditions prev_conditions,
Node* current_condition, Node* current_branch,
bool is_true_branch, bool in_new_block);
- void MarkAsSafetyCheckIfNeeded(Node* branch, Node* node);
Node* dead() const { return dead_; }
Graph* graph() const;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 985a256c57..019f0bc954 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -141,9 +141,8 @@ class BytecodeGraphBuilder {
Node* NewIfDefault() { return NewNode(common()->IfDefault()); }
Node* NewMerge() { return NewNode(common()->Merge(1), true); }
Node* NewLoop() { return NewNode(common()->Loop(1), true); }
- Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck) {
- return NewNode(common()->Branch(hint, is_safety_check), condition);
+ Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) {
+ return NewNode(common()->Branch(hint), condition);
}
Node* NewSwitch(Node* condition, int control_output_count) {
return NewNode(common()->Switch(control_output_count), condition);
@@ -1053,7 +1052,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
shared_info_(shared_info),
bytecode_array_(shared_info.GetBytecodeArray()),
feedback_cell_(feedback_cell),
- feedback_vector_(feedback_cell.value().value()),
+ feedback_vector_(feedback_cell.feedback_vector().value()),
invocation_frequency_(invocation_frequency),
type_hint_lowering_(
broker, jsgraph, feedback_vector_,
@@ -3959,7 +3958,7 @@ void BytecodeGraphBuilder::BuildJump() {
}
void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
- NewBranch(condition, BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck);
+ NewBranch(condition, BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfTrue();
@@ -3971,7 +3970,7 @@ void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
}
void BytecodeGraphBuilder::BuildJumpIfNot(Node* condition) {
- NewBranch(condition, BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck);
+ NewBranch(condition, BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfFalse();
@@ -3997,8 +3996,7 @@ void BytecodeGraphBuilder::BuildJumpIfNotEqual(Node* comperand) {
}
void BytecodeGraphBuilder::BuildJumpIfFalse() {
- NewBranch(environment()->LookupAccumulator(), BranchHint::kNone,
- IsSafetyCheck::kNoSafetyCheck);
+ NewBranch(environment()->LookupAccumulator(), BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfFalse();
@@ -4012,8 +4010,7 @@ void BytecodeGraphBuilder::BuildJumpIfFalse() {
}
void BytecodeGraphBuilder::BuildJumpIfTrue() {
- NewBranch(environment()->LookupAccumulator(), BranchHint::kNone,
- IsSafetyCheck::kNoSafetyCheck);
+ NewBranch(environment()->LookupAccumulator(), BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfTrue();
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 5950541111..e62babccf1 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -100,6 +100,18 @@ namespace {
#define CALLEE_SAVE_FP_REGISTERS \
f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() | f30.bit()
+#elif V8_TARGET_ARCH_LOONG64
+// ===========================================================================
+// == loong64 ================================================================
+// ===========================================================================
+#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
+#define CALLEE_SAVE_REGISTERS \
+ s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \
+ s7.bit() | s8.bit() | fp.bit()
+#define CALLEE_SAVE_FP_REGISTERS \
+ f24.bit() | f25.bit() | f26.bit() | f27.bit() | f28.bit() | f29.bit() | \
+ f30.bit() | f31.bit()
+
#elif V8_TARGET_ARCH_PPC64
// ===========================================================================
// == ppc & ppc64 ============================================================
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 2cbcce236f..d27744072a 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -48,8 +48,7 @@ static_assert(
CodeAssemblerState::CodeAssemblerState(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
- CodeKind kind, const char* name, PoisoningMitigationLevel poisoning_level,
- Builtin builtin)
+ CodeKind kind, const char* name, Builtin builtin)
// TODO(rmcilroy): Should we use Linkage::GetBytecodeDispatchDescriptor for
// bytecode handlers?
: CodeAssemblerState(
@@ -57,29 +56,26 @@ CodeAssemblerState::CodeAssemblerState(
Linkage::GetStubCallDescriptor(
zone, descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties),
- kind, name, poisoning_level, builtin) {}
+ kind, name, builtin) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
int parameter_count, CodeKind kind,
- const char* name,
- PoisoningMitigationLevel poisoning_level,
- Builtin builtin)
+ const char* name, Builtin builtin)
: CodeAssemblerState(
isolate, zone,
Linkage::GetJSCallDescriptor(zone, false, parameter_count,
CallDescriptor::kCanUseRoots),
- kind, name, poisoning_level, builtin) {}
+ kind, name, builtin) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor,
CodeKind kind, const char* name,
- PoisoningMitigationLevel poisoning_level,
Builtin builtin)
: raw_assembler_(new RawMachineAssembler(
isolate, zone->New<Graph>(zone), call_descriptor,
MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
- InstructionSelector::AlignmentRequirements(), poisoning_level)),
+ InstructionSelector::AlignmentRequirements())),
kind_(kind),
name_(name),
builtin_(builtin),
@@ -169,10 +165,6 @@ bool CodeAssembler::Word32ShiftIsSafe() const {
return raw_assembler()->machine()->Word32ShiftIsSafe();
}
-PoisoningMitigationLevel CodeAssembler::poisoning_level() const {
- return raw_assembler()->poisoning_level();
-}
-
// static
Handle<Code> CodeAssembler::GenerateCode(
CodeAssemblerState* state, const AssemblerOptions& options,
@@ -187,7 +179,7 @@ Handle<Code> CodeAssembler::GenerateCode(
code = Pipeline::GenerateCodeForCodeStub(
rasm->isolate(), rasm->call_descriptor(), graph, state->jsgraph_,
rasm->source_positions(), state->kind_, state->name_,
- state->builtin_, rasm->poisoning_level(), options, profile_data)
+ state->builtin_, options, profile_data)
.ToHandleChecked();
state->code_generated_ = true;
@@ -565,15 +557,6 @@ TNode<RawPtrT> CodeAssembler::LoadParentFramePointer() {
return UncheckedCast<RawPtrT>(raw_assembler()->LoadParentFramePointer());
}
-TNode<Object> CodeAssembler::TaggedPoisonOnSpeculation(TNode<Object> value) {
- return UncheckedCast<Object>(
- raw_assembler()->TaggedPoisonOnSpeculation(value));
-}
-
-TNode<WordT> CodeAssembler::WordPoisonOnSpeculation(TNode<WordT> value) {
- return UncheckedCast<WordT>(raw_assembler()->WordPoisonOnSpeculation(value));
-}
-
#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name, ResType, Arg1Type, Arg2Type) \
TNode<ResType> CodeAssembler::name(TNode<Arg1Type> a, TNode<Arg2Type> b) { \
return UncheckedCast<ResType>(raw_assembler()->name(a, b)); \
@@ -677,45 +660,44 @@ TNode<Int32T> CodeAssembler::TruncateFloat32ToInt32(TNode<Float32T> value) {
CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
#undef DEFINE_CODE_ASSEMBLER_UNARY_OP
-Node* CodeAssembler::Load(MachineType type, Node* base,
- LoadSensitivity needs_poisoning) {
- return raw_assembler()->Load(type, base, needs_poisoning);
+Node* CodeAssembler::Load(MachineType type, Node* base) {
+ return raw_assembler()->Load(type, base);
}
-Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset,
- LoadSensitivity needs_poisoning) {
- return raw_assembler()->Load(type, base, offset, needs_poisoning);
+Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset) {
+ return raw_assembler()->Load(type, base, offset);
}
-TNode<Object> CodeAssembler::LoadFullTagged(Node* base,
- LoadSensitivity needs_poisoning) {
- return BitcastWordToTagged(Load<RawPtrT>(base, needs_poisoning));
+TNode<Object> CodeAssembler::LoadFullTagged(Node* base) {
+ return BitcastWordToTagged(Load<RawPtrT>(base));
}
-TNode<Object> CodeAssembler::LoadFullTagged(Node* base, TNode<IntPtrT> offset,
- LoadSensitivity needs_poisoning) {
+TNode<Object> CodeAssembler::LoadFullTagged(Node* base, TNode<IntPtrT> offset) {
// Please use LoadFromObject(MachineType::MapInHeader(), object,
// IntPtrConstant(-kHeapObjectTag)) instead.
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
- return BitcastWordToTagged(Load<RawPtrT>(base, offset, needs_poisoning));
+ return BitcastWordToTagged(Load<RawPtrT>(base, offset));
}
-Node* CodeAssembler::AtomicLoad(MachineType type, TNode<RawPtrT> base,
- TNode<WordT> offset) {
+Node* CodeAssembler::AtomicLoad(MachineType type, AtomicMemoryOrder order,
+ TNode<RawPtrT> base, TNode<WordT> offset) {
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
- return raw_assembler()->AtomicLoad(type, base, offset);
+ return raw_assembler()->AtomicLoad(AtomicLoadParameters(type, order), base,
+ offset);
}
template <class Type>
-TNode<Type> CodeAssembler::AtomicLoad64(TNode<RawPtrT> base,
+TNode<Type> CodeAssembler::AtomicLoad64(AtomicMemoryOrder order,
+ TNode<RawPtrT> base,
TNode<WordT> offset) {
- return UncheckedCast<Type>(raw_assembler()->AtomicLoad64(base, offset));
+ return UncheckedCast<Type>(raw_assembler()->AtomicLoad64(
+ AtomicLoadParameters(MachineType::Uint64(), order), base, offset));
}
template TNode<AtomicInt64> CodeAssembler::AtomicLoad64<AtomicInt64>(
- TNode<RawPtrT> base, TNode<WordT> offset);
+ AtomicMemoryOrder order, TNode<RawPtrT> base, TNode<WordT> offset);
template TNode<AtomicUint64> CodeAssembler::AtomicLoad64<AtomicUint64>(
- TNode<RawPtrT> base, TNode<WordT> offset);
+ AtomicMemoryOrder order, TNode<RawPtrT> base, TNode<WordT> offset);
Node* CodeAssembler::LoadFromObject(MachineType type, TNode<Object> object,
TNode<IntPtrT> offset) {
@@ -880,16 +862,22 @@ void CodeAssembler::StoreFullTaggedNoWriteBarrier(TNode<RawPtrT> base,
BitcastTaggedToWord(tagged_value));
}
-void CodeAssembler::AtomicStore(MachineRepresentation rep, TNode<RawPtrT> base,
+void CodeAssembler::AtomicStore(MachineRepresentation rep,
+ AtomicMemoryOrder order, TNode<RawPtrT> base,
TNode<WordT> offset, TNode<Word32T> value) {
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
- raw_assembler()->AtomicStore(rep, base, offset, value);
+ raw_assembler()->AtomicStore(
+ AtomicStoreParameters(rep, WriteBarrierKind::kNoWriteBarrier, order),
+ base, offset, value);
}
-void CodeAssembler::AtomicStore64(TNode<RawPtrT> base, TNode<WordT> offset,
- TNode<UintPtrT> value,
+void CodeAssembler::AtomicStore64(AtomicMemoryOrder order, TNode<RawPtrT> base,
+ TNode<WordT> offset, TNode<UintPtrT> value,
TNode<UintPtrT> value_high) {
- raw_assembler()->AtomicStore64(base, offset, value, value_high);
+ raw_assembler()->AtomicStore64(
+ AtomicStoreParameters(MachineRepresentation::kWord64,
+ WriteBarrierKind::kNoWriteBarrier, order),
+ base, offset, value, value_high);
}
#define ATOMIC_FUNCTION(name) \
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 0e6872aa66..7a22086260 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -17,6 +17,7 @@
#include "src/base/optional.h"
#include "src/base/type-traits.h"
#include "src/builtins/builtins.h"
+#include "src/codegen/atomic-memory-order.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/source-position.h"
@@ -725,47 +726,36 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<RawPtrT> LoadFramePointer();
TNode<RawPtrT> LoadParentFramePointer();
- // Poison |value| on speculative paths.
- TNode<Object> TaggedPoisonOnSpeculation(TNode<Object> value);
- TNode<WordT> WordPoisonOnSpeculation(TNode<WordT> value);
-
// Load raw memory location.
- Node* Load(MachineType type, Node* base,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ Node* Load(MachineType type, Node* base);
template <class Type>
TNode<Type> Load(MachineType type, TNode<RawPtr<Type>> base) {
DCHECK(
IsSubtype(type.representation(), MachineRepresentationOf<Type>::value));
return UncheckedCast<Type>(Load(type, static_cast<Node*>(base)));
}
- Node* Load(MachineType type, Node* base, Node* offset,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ Node* Load(MachineType type, Node* base, Node* offset);
template <class Type>
- TNode<Type> Load(Node* base,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- return UncheckedCast<Type>(
- Load(MachineTypeOf<Type>::value, base, needs_poisoning));
+ TNode<Type> Load(Node* base) {
+ return UncheckedCast<Type>(Load(MachineTypeOf<Type>::value, base));
}
template <class Type>
- TNode<Type> Load(Node* base, TNode<WordT> offset,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- return UncheckedCast<Type>(
- Load(MachineTypeOf<Type>::value, base, offset, needs_poisoning));
+ TNode<Type> Load(Node* base, TNode<WordT> offset) {
+ return UncheckedCast<Type>(Load(MachineTypeOf<Type>::value, base, offset));
}
template <class Type>
- TNode<Type> AtomicLoad(TNode<RawPtrT> base, TNode<WordT> offset) {
+ TNode<Type> AtomicLoad(AtomicMemoryOrder order, TNode<RawPtrT> base,
+ TNode<WordT> offset) {
return UncheckedCast<Type>(
- AtomicLoad(MachineTypeOf<Type>::value, base, offset));
+ AtomicLoad(MachineTypeOf<Type>::value, order, base, offset));
}
template <class Type>
- TNode<Type> AtomicLoad64(TNode<RawPtrT> base, TNode<WordT> offset);
+ TNode<Type> AtomicLoad64(AtomicMemoryOrder order, TNode<RawPtrT> base,
+ TNode<WordT> offset);
// Load uncompressed tagged value from (most likely off JS heap) memory
// location.
- TNode<Object> LoadFullTagged(
- Node* base, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
- TNode<Object> LoadFullTagged(
- Node* base, TNode<IntPtrT> offset,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ TNode<Object> LoadFullTagged(Node* base);
+ TNode<Object> LoadFullTagged(Node* base, TNode<IntPtrT> offset);
Node* LoadFromObject(MachineType type, TNode<Object> object,
TNode<IntPtrT> offset);
@@ -822,12 +812,14 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<HeapObject> object,
int offset, Node* value);
void OptimizedStoreMap(TNode<HeapObject> object, TNode<Map>);
- void AtomicStore(MachineRepresentation rep, TNode<RawPtrT> base,
- TNode<WordT> offset, TNode<Word32T> value);
+ void AtomicStore(MachineRepresentation rep, AtomicMemoryOrder order,
+ TNode<RawPtrT> base, TNode<WordT> offset,
+ TNode<Word32T> value);
// {value_high} is used for 64-bit stores on 32-bit platforms, must be
// nullptr in other cases.
- void AtomicStore64(TNode<RawPtrT> base, TNode<WordT> offset,
- TNode<UintPtrT> value, TNode<UintPtrT> value_high);
+ void AtomicStore64(AtomicMemoryOrder order, TNode<RawPtrT> base,
+ TNode<WordT> offset, TNode<UintPtrT> value,
+ TNode<UintPtrT> value_high);
TNode<Word32T> AtomicAdd(MachineType type, TNode<RawPtrT> base,
TNode<UintPtrT> offset, TNode<Word32T> value);
@@ -1225,7 +1217,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class... TArgs>
TNode<Object> CallJS(Callable const& callable, Node* context, Node* function,
Node* receiver, TArgs... args) {
- int argc = static_cast<int>(sizeof...(args));
+ int argc = JSParameterCount(static_cast<int>(sizeof...(args)));
TNode<Int32T> arity = Int32Constant(argc);
TNode<Code> target = HeapConstant(callable.code());
return CAST(CallJSStubImpl(callable.descriptor(), target, CAST(context),
@@ -1235,7 +1227,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class... TArgs>
Node* ConstructJSWithTarget(Callable const& callable, Node* context,
Node* function, Node* new_target, TArgs... args) {
- int argc = static_cast<int>(sizeof...(args));
+ int argc = JSParameterCount(static_cast<int>(sizeof...(args)));
TNode<Int32T> arity = Int32Constant(argc);
TNode<Object> receiver = LoadRoot(RootIndex::kUndefinedValue);
TNode<Code> target = HeapConstant(callable.code());
@@ -1312,7 +1304,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void UnregisterCallGenerationCallbacks();
bool Word32ShiftIsSafe() const;
- PoisoningMitigationLevel poisoning_level() const;
bool IsJSFunctionCall() const;
@@ -1367,7 +1358,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
const CallInterfaceDescriptor& descriptor, int input_count,
Node* const* inputs);
- Node* AtomicLoad(MachineType type, TNode<RawPtrT> base, TNode<WordT> offset);
+ Node* AtomicLoad(MachineType type, AtomicMemoryOrder order,
+ TNode<RawPtrT> base, TNode<WordT> offset);
Node* UnalignedLoad(MachineType type, TNode<RawPtrT> base,
TNode<WordT> offset);
@@ -1595,13 +1587,11 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
// TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
CodeAssemblerState(Isolate* isolate, Zone* zone,
const CallInterfaceDescriptor& descriptor, CodeKind kind,
- const char* name, PoisoningMitigationLevel poisoning_level,
- Builtin builtin = Builtin::kNoBuiltinId);
+ const char* name, Builtin builtin = Builtin::kNoBuiltinId);
// Create with JSCall linkage.
CodeAssemblerState(Isolate* isolate, Zone* zone, int parameter_count,
CodeKind kind, const char* name,
- PoisoningMitigationLevel poisoning_level,
Builtin builtin = Builtin::kNoBuiltinId);
~CodeAssemblerState();
@@ -1628,8 +1618,7 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
CodeAssemblerState(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor, CodeKind kind,
- const char* name, PoisoningMitigationLevel poisoning_level,
- Builtin builtin);
+ const char* name, Builtin builtin);
void PushExceptionHandler(CodeAssemblerExceptionHandlerLabel* label);
void PopExceptionHandler();
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index b370a673b9..329ccc7e86 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -28,18 +28,6 @@ std::ostream& operator<<(std::ostream& os, BranchHint hint) {
UNREACHABLE();
}
-std::ostream& operator<<(std::ostream& os, IsSafetyCheck is_safety_check) {
- switch (is_safety_check) {
- case IsSafetyCheck::kCriticalSafetyCheck:
- return os << "CriticalSafetyCheck";
- case IsSafetyCheck::kSafetyCheck:
- return os << "SafetyCheck";
- case IsSafetyCheck::kNoSafetyCheck:
- return os << "NoSafetyCheck";
- }
- UNREACHABLE();
-}
-
std::ostream& operator<<(std::ostream& os, TrapId trap_id) {
switch (trap_id) {
#define TRAP_CASE(Name) \
@@ -59,22 +47,12 @@ TrapId TrapIdOf(const Operator* const op) {
return OpParameter<TrapId>(op);
}
-std::ostream& operator<<(std::ostream& os, BranchOperatorInfo info) {
- return os << info.hint << ", " << info.is_safety_check;
-}
-
-const BranchOperatorInfo& BranchOperatorInfoOf(const Operator* const op) {
- DCHECK_EQ(IrOpcode::kBranch, op->opcode());
- return OpParameter<BranchOperatorInfo>(op);
-}
-
BranchHint BranchHintOf(const Operator* const op) {
switch (op->opcode()) {
- case IrOpcode::kBranch:
- return BranchOperatorInfoOf(op).hint;
case IrOpcode::kIfValue:
return IfValueParametersOf(op).hint();
case IrOpcode::kIfDefault:
+ case IrOpcode::kBranch:
return OpParameter<BranchHint>(op);
default:
UNREACHABLE();
@@ -90,8 +68,7 @@ int ValueInputCountOfReturn(Operator const* const op) {
bool operator==(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason() &&
- lhs.feedback() == rhs.feedback() &&
- lhs.is_safety_check() == rhs.is_safety_check();
+ lhs.feedback() == rhs.feedback();
}
bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
@@ -100,13 +77,11 @@ bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
size_t hash_value(DeoptimizeParameters p) {
FeedbackSource::Hash feebdack_hash;
- return base::hash_combine(p.kind(), p.reason(), feebdack_hash(p.feedback()),
- p.is_safety_check());
+ return base::hash_combine(p.kind(), p.reason(), feebdack_hash(p.feedback()));
}
std::ostream& operator<<(std::ostream& os, DeoptimizeParameters p) {
- return os << p.kind() << ", " << p.reason() << ", " << p.is_safety_check()
- << ", " << p.feedback();
+ return os << p.kind() << ", " << p.reason() << ", " << p.feedback();
}
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
@@ -117,32 +92,6 @@ DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
return OpParameter<DeoptimizeParameters>(op);
}
-IsSafetyCheck IsSafetyCheckOf(const Operator* op) {
- if (op->opcode() == IrOpcode::kBranch) {
- return BranchOperatorInfoOf(op).is_safety_check;
- }
- return DeoptimizeParametersOf(op).is_safety_check();
-}
-
-const Operator* CommonOperatorBuilder::MarkAsSafetyCheck(
- const Operator* op, IsSafetyCheck safety_check) {
- if (op->opcode() == IrOpcode::kBranch) {
- BranchOperatorInfo info = BranchOperatorInfoOf(op);
- if (info.is_safety_check == safety_check) return op;
- return Branch(info.hint, safety_check);
- }
- DeoptimizeParameters p = DeoptimizeParametersOf(op);
- if (p.is_safety_check() == safety_check) return op;
- switch (op->opcode()) {
- case IrOpcode::kDeoptimizeIf:
- return DeoptimizeIf(p.kind(), p.reason(), p.feedback(), safety_check);
- case IrOpcode::kDeoptimizeUnless:
- return DeoptimizeUnless(p.kind(), p.reason(), p.feedback(), safety_check);
- default:
- UNREACHABLE();
- }
-}
-
const Operator* CommonOperatorBuilder::DelayedStringConstant(
const StringConstantBase* str) {
return zone()->New<Operator1<const StringConstantBase*>>(
@@ -478,16 +427,10 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
#define CACHED_LOOP_EXIT_VALUE_LIST(V) V(kTagged)
-#define CACHED_BRANCH_LIST(V) \
- V(None, CriticalSafetyCheck) \
- V(True, CriticalSafetyCheck) \
- V(False, CriticalSafetyCheck) \
- V(None, SafetyCheck) \
- V(True, SafetyCheck) \
- V(False, SafetyCheck) \
- V(None, NoSafetyCheck) \
- V(True, NoSafetyCheck) \
- V(False, NoSafetyCheck)
+#define CACHED_BRANCH_LIST(V) \
+ V(None) \
+ V(True) \
+ V(False)
#define CACHED_RETURN_LIST(V) \
V(1) \
@@ -541,28 +484,22 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
V(Soft, InsufficientTypeFeedbackForGenericKeyedAccess) \
V(Soft, InsufficientTypeFeedbackForGenericNamedAccess)
-#define CACHED_DEOPTIMIZE_IF_LIST(V) \
- V(Eager, DivisionByZero, NoSafetyCheck) \
- V(Eager, DivisionByZero, SafetyCheck) \
- V(Eager, Hole, NoSafetyCheck) \
- V(Eager, Hole, SafetyCheck) \
- V(Eager, MinusZero, NoSafetyCheck) \
- V(Eager, MinusZero, SafetyCheck) \
- V(Eager, Overflow, NoSafetyCheck) \
- V(Eager, Overflow, SafetyCheck) \
- V(Eager, Smi, SafetyCheck)
-
-#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
- V(Eager, LostPrecision, NoSafetyCheck) \
- V(Eager, LostPrecision, SafetyCheck) \
- V(Eager, LostPrecisionOrNaN, NoSafetyCheck) \
- V(Eager, LostPrecisionOrNaN, SafetyCheck) \
- V(Eager, NotAHeapNumber, SafetyCheck) \
- V(Eager, NotANumberOrOddball, SafetyCheck) \
- V(Eager, NotASmi, SafetyCheck) \
- V(Eager, OutOfBounds, SafetyCheck) \
- V(Eager, WrongInstanceType, SafetyCheck) \
- V(Eager, WrongMap, SafetyCheck)
+#define CACHED_DEOPTIMIZE_IF_LIST(V) \
+ V(Eager, DivisionByZero) \
+ V(Eager, Hole) \
+ V(Eager, MinusZero) \
+ V(Eager, Overflow) \
+ V(Eager, Smi)
+
+#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
+ V(Eager, LostPrecision) \
+ V(Eager, LostPrecisionOrNaN) \
+ V(Eager, NotAHeapNumber) \
+ V(Eager, NotANumberOrOddball) \
+ V(Eager, NotASmi) \
+ V(Eager, OutOfBounds) \
+ V(Eager, WrongInstanceType) \
+ V(Eager, WrongMap)
#define CACHED_DYNAMIC_CHECK_MAPS_LIST(V) \
V(DynamicCheckMaps) \
@@ -668,18 +605,17 @@ struct CommonOperatorGlobalCache final {
CACHED_RETURN_LIST(CACHED_RETURN)
#undef CACHED_RETURN
- template <BranchHint hint, IsSafetyCheck is_safety_check>
- struct BranchOperator final : public Operator1<BranchOperatorInfo> {
+ template <BranchHint hint>
+ struct BranchOperator final : public Operator1<BranchHint> {
BranchOperator()
- : Operator1<BranchOperatorInfo>( // --
- IrOpcode::kBranch, Operator::kKontrol, // opcode
- "Branch", // name
- 1, 0, 1, 0, 0, 2, // counts
- BranchOperatorInfo{hint, is_safety_check}) {} // parameter
+ : Operator1<BranchHint>( // --
+ IrOpcode::kBranch, Operator::kKontrol, // opcode
+ "Branch", // name
+ 1, 0, 1, 0, 0, 2, // counts
+ hint) {} // parameter
};
-#define CACHED_BRANCH(Hint, IsCheck) \
- BranchOperator<BranchHint::k##Hint, IsSafetyCheck::k##IsCheck> \
- kBranch##Hint##IsCheck##Operator;
+#define CACHED_BRANCH(Hint) \
+ BranchOperator<BranchHint::k##Hint> kBranch##Hint##Operator;
CACHED_BRANCH_LIST(CACHED_BRANCH)
#undef CACHED_BRANCH
@@ -757,8 +693,7 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"Deoptimize", // name
1, 1, 1, 0, 0, 1, // counts
- DeoptimizeParameters(kKind, kReason, FeedbackSource(),
- IsSafetyCheck::kNoSafetyCheck)) {}
+ DeoptimizeParameters(kKind, kReason, FeedbackSource())) {}
};
#define CACHED_DEOPTIMIZE(Kind, Reason) \
DeoptimizeOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
@@ -766,8 +701,7 @@ struct CommonOperatorGlobalCache final {
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
- template <DeoptimizeKind kKind, DeoptimizeReason kReason,
- IsSafetyCheck is_safety_check>
+ template <DeoptimizeKind kKind, DeoptimizeReason kReason>
struct DeoptimizeIfOperator final : public Operator1<DeoptimizeParameters> {
DeoptimizeIfOperator()
: Operator1<DeoptimizeParameters>( // --
@@ -775,18 +709,15 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeIf", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason, FeedbackSource(),
- is_safety_check)) {}
+ DeoptimizeParameters(kKind, kReason, FeedbackSource())) {}
};
-#define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck) \
- DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason, \
- IsSafetyCheck::k##IsCheck> \
- kDeoptimizeIf##Kind##Reason##IsCheck##Operator;
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
+ DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
+ kDeoptimizeIf##Kind##Reason##Operator;
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
- template <DeoptimizeKind kKind, DeoptimizeReason kReason,
- IsSafetyCheck is_safety_check>
+ template <DeoptimizeKind kKind, DeoptimizeReason kReason>
struct DeoptimizeUnlessOperator final
: public Operator1<DeoptimizeParameters> {
DeoptimizeUnlessOperator()
@@ -795,14 +726,12 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeUnless", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason, FeedbackSource(),
- is_safety_check)) {}
+ DeoptimizeParameters(kKind, kReason, FeedbackSource())) {}
};
-#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
DeoptimizeUnlessOperator<DeoptimizeKind::k##Kind, \
- DeoptimizeReason::k##Reason, \
- IsSafetyCheck::k##IsCheck> \
- kDeoptimizeUnless##Kind##Reason##IsCheck##Operator;
+ DeoptimizeReason::k##Reason> \
+ kDeoptimizeUnless##Kind##Reason##Operator;
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
@@ -815,8 +744,7 @@ struct CommonOperatorGlobalCache final {
"DynamicCheckMapsWithDeoptUnless", // name
6, 1, 1, 0, 1, 1, // counts
DeoptimizeParameters(DeoptimizeKind::kEagerWithResume, kReason,
- FeedbackSource(),
- IsSafetyCheck::kCriticalSafetyCheck)) {}
+ FeedbackSource())) {}
};
#define CACHED_DYNAMIC_CHECK_MAPS(Reason) \
DynamicMapCheckOperator<DeoptimizeReason::k##Reason> k##Reason##Operator;
@@ -985,12 +913,10 @@ const Operator* CommonOperatorBuilder::StaticAssert(const char* source) {
1, 0, source);
}
-const Operator* CommonOperatorBuilder::Branch(BranchHint hint,
- IsSafetyCheck is_safety_check) {
-#define CACHED_BRANCH(Hint, IsCheck) \
- if (hint == BranchHint::k##Hint && \
- is_safety_check == IsSafetyCheck::k##IsCheck) { \
- return &cache_.kBranch##Hint##IsCheck##Operator; \
+const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
+#define CACHED_BRANCH(Hint) \
+ if (hint == BranchHint::k##Hint) { \
+ return &cache_.kBranch##Hint##Operator; \
}
CACHED_BRANCH_LIST(CACHED_BRANCH)
#undef CACHED_BRANCH
@@ -1008,8 +934,7 @@ const Operator* CommonOperatorBuilder::Deoptimize(
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback,
- IsSafetyCheck::kNoSafetyCheck);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return zone()->New<Operator1<DeoptimizeParameters>>( // --
IrOpcode::kDeoptimize, // opcodes
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -1020,17 +945,16 @@ const Operator* CommonOperatorBuilder::Deoptimize(
const Operator* CommonOperatorBuilder::DeoptimizeIf(
DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, IsSafetyCheck is_safety_check) {
-#define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason && \
- is_safety_check == IsSafetyCheck::k##IsCheck && !feedback.IsValid()) { \
- return &cache_.kDeoptimizeIf##Kind##Reason##IsCheck##Operator; \
+ FeedbackSource const& feedback) {
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeIf##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback, is_safety_check);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return zone()->New<Operator1<DeoptimizeParameters>>( // --
IrOpcode::kDeoptimizeIf, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -1041,17 +965,16 @@ const Operator* CommonOperatorBuilder::DeoptimizeIf(
const Operator* CommonOperatorBuilder::DeoptimizeUnless(
DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, IsSafetyCheck is_safety_check) {
-#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason && \
- is_safety_check == IsSafetyCheck::k##IsCheck && !feedback.IsValid()) { \
- return &cache_.kDeoptimizeUnless##Kind##Reason##IsCheck##Operator; \
+ FeedbackSource const& feedback) {
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeUnless##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback, is_safety_check);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return zone()->New<Operator1<DeoptimizeParameters>>( // --
IrOpcode::kDeoptimizeUnless, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -1664,17 +1587,6 @@ const FrameStateInfo& FrameStateInfoOf(const Operator* op) {
return OpParameter<FrameStateInfo>(op);
}
-IsSafetyCheck CombineSafetyChecks(IsSafetyCheck a, IsSafetyCheck b) {
- if (a == IsSafetyCheck::kCriticalSafetyCheck ||
- b == IsSafetyCheck::kCriticalSafetyCheck) {
- return IsSafetyCheck::kCriticalSafetyCheck;
- }
- if (a == IsSafetyCheck::kSafetyCheck || b == IsSafetyCheck::kSafetyCheck) {
- return IsSafetyCheck::kSafetyCheck;
- }
- return IsSafetyCheck::kNoSafetyCheck;
-}
-
#undef COMMON_CACHED_OP_LIST
#undef CACHED_BRANCH_LIST
#undef CACHED_RETURN_LIST
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index fa49d3b992..f691c1fbf4 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -51,20 +51,6 @@ inline size_t hash_value(BranchHint hint) { return static_cast<size_t>(hint); }
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchHint);
-enum class IsSafetyCheck : uint8_t {
- kCriticalSafetyCheck,
- kSafetyCheck,
- kNoSafetyCheck
-};
-
-// Get the more critical safety check of the two arguments.
-IsSafetyCheck CombineSafetyChecks(IsSafetyCheck, IsSafetyCheck);
-
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, IsSafetyCheck);
-inline size_t hash_value(IsSafetyCheck is_safety_check) {
- return static_cast<size_t>(is_safety_check);
-}
-
enum class TrapId : uint32_t {
#define DEF_ENUM(Name, ...) k##Name,
FOREACH_WASM_TRAPREASON(DEF_ENUM)
@@ -78,24 +64,6 @@ std::ostream& operator<<(std::ostream&, TrapId trap_id);
TrapId TrapIdOf(const Operator* const op);
-struct BranchOperatorInfo {
- BranchHint hint;
- IsSafetyCheck is_safety_check;
-};
-
-inline size_t hash_value(const BranchOperatorInfo& info) {
- return base::hash_combine(info.hint, info.is_safety_check);
-}
-
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchOperatorInfo);
-
-inline bool operator==(const BranchOperatorInfo& a,
- const BranchOperatorInfo& b) {
- return a.hint == b.hint && a.is_safety_check == b.is_safety_check;
-}
-
-V8_EXPORT_PRIVATE const BranchOperatorInfo& BranchOperatorInfoOf(
- const Operator* const) V8_WARN_UNUSED_RESULT;
V8_EXPORT_PRIVATE BranchHint BranchHintOf(const Operator* const)
V8_WARN_UNUSED_RESULT;
@@ -106,23 +74,17 @@ int ValueInputCountOfReturn(Operator const* const op);
class DeoptimizeParameters final {
public:
DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback,
- IsSafetyCheck is_safety_check)
- : kind_(kind),
- reason_(reason),
- feedback_(feedback),
- is_safety_check_(is_safety_check) {}
+ FeedbackSource const& feedback)
+ : kind_(kind), reason_(reason), feedback_(feedback) {}
DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
const FeedbackSource& feedback() const { return feedback_; }
- IsSafetyCheck is_safety_check() const { return is_safety_check_; }
private:
DeoptimizeKind const kind_;
DeoptimizeReason const reason_;
FeedbackSource const feedback_;
- IsSafetyCheck is_safety_check_;
};
bool operator==(DeoptimizeParameters, DeoptimizeParameters);
@@ -135,8 +97,6 @@ std::ostream& operator<<(std::ostream&, DeoptimizeParameters p);
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const)
V8_WARN_UNUSED_RESULT;
-IsSafetyCheck IsSafetyCheckOf(const Operator* op) V8_WARN_UNUSED_RESULT;
-
class SelectParameters final {
public:
explicit SelectParameters(MachineRepresentation representation,
@@ -479,8 +439,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Unreachable();
const Operator* StaticAssert(const char* source);
const Operator* End(size_t control_input_count);
- const Operator* Branch(BranchHint = BranchHint::kNone,
- IsSafetyCheck = IsSafetyCheck::kSafetyCheck);
+ const Operator* Branch(BranchHint = BranchHint::kNone);
const Operator* IfTrue();
const Operator* IfFalse();
const Operator* IfSuccess();
@@ -492,14 +451,10 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Throw();
const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback);
- const Operator* DeoptimizeIf(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
- const Operator* DeoptimizeUnless(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
+ const Operator* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
+ FeedbackSource const& feedback);
+ const Operator* DeoptimizeUnless(DeoptimizeKind kind, DeoptimizeReason reason,
+ FeedbackSource const& feedback);
// DynamicCheckMapsWithDeoptUnless will call the dynamic map check builtin if
// the condition is false, which may then either deoptimize or resume
// execution.
@@ -577,9 +532,6 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const wasm::FunctionSig* signature);
#endif // V8_ENABLE_WEBASSEMBLY
- const Operator* MarkAsSafetyCheck(const Operator* op,
- IsSafetyCheck safety_check);
-
const Operator* DelayedStringConstant(const StringConstantBase* str);
private:
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index dc2db32753..27720c80ed 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -5,7 +5,6 @@
#include "src/compiler/compilation-dependencies.h"
#include "src/base/optional.h"
-#include "src/compiler/compilation-dependency.h"
#include "src/execution/protectors.h"
#include "src/handles/handles-inl.h"
#include "src/objects/allocation-site-inl.h"
@@ -19,18 +18,84 @@ namespace v8 {
namespace internal {
namespace compiler {
+#define DEPENDENCY_LIST(V) \
+ V(ConsistentJSFunctionView) \
+ V(ConstantInDictionaryPrototypeChain) \
+ V(ElementsKind) \
+ V(FieldConstness) \
+ V(FieldRepresentation) \
+ V(FieldType) \
+ V(GlobalProperty) \
+ V(InitialMap) \
+ V(InitialMapInstanceSizePrediction) \
+ V(OwnConstantDataProperty) \
+ V(OwnConstantDictionaryProperty) \
+ V(OwnConstantElement) \
+ V(PretenureMode) \
+ V(Protector) \
+ V(PrototypeProperty) \
+ V(StableMap) \
+ V(Transition)
+
CompilationDependencies::CompilationDependencies(JSHeapBroker* broker,
Zone* zone)
: zone_(zone), broker_(broker), dependencies_(zone) {
broker->set_dependencies(this);
}
+namespace {
+
+enum CompilationDependencyKind {
+#define V(Name) k##Name,
+ DEPENDENCY_LIST(V)
+#undef V
+};
+
+#define V(Name) class Name##Dependency;
+DEPENDENCY_LIST(V)
+#undef V
+
+const char* CompilationDependencyKindToString(CompilationDependencyKind kind) {
+#define V(Name) #Name "Dependency",
+ static const char* const names[] = {DEPENDENCY_LIST(V)};
+#undef V
+ return names[kind];
+}
+
+} // namespace
+
+class CompilationDependency : public ZoneObject {
+ public:
+ explicit CompilationDependency(CompilationDependencyKind kind) : kind(kind) {}
+
+ virtual bool IsValid() const = 0;
+ virtual void PrepareInstall() const {}
+ virtual void Install(Handle<Code> code) const = 0;
+
+#ifdef DEBUG
+#define V(Name) \
+ bool Is##Name() const { return kind == k##Name; } \
+ V8_ALLOW_UNUSED const Name##Dependency* As##Name() const;
+ DEPENDENCY_LIST(V)
+#undef V
+#endif
+
+ const char* ToString() const {
+ return CompilationDependencyKindToString(kind);
+ }
+
+ const CompilationDependencyKind kind;
+};
+
+namespace {
+
class InitialMapDependency final : public CompilationDependency {
public:
InitialMapDependency(JSHeapBroker* broker, const JSFunctionRef& function,
const MapRef& initial_map)
- : function_(function), initial_map_(initial_map) {
- }
+ : CompilationDependency(kInitialMap),
+ function_(function),
+ initial_map_(initial_map) {}
bool IsValid() const override {
Handle<JSFunction> function = function_.object();
@@ -55,7 +120,9 @@ class PrototypePropertyDependency final : public CompilationDependency {
PrototypePropertyDependency(JSHeapBroker* broker,
const JSFunctionRef& function,
const ObjectRef& prototype)
- : function_(function), prototype_(prototype) {
+ : CompilationDependency(kPrototypeProperty),
+ function_(function),
+ prototype_(prototype) {
DCHECK(function_.has_instance_prototype(broker->dependencies()));
DCHECK(!function_.PrototypeRequiresRuntimeLookup(broker->dependencies()));
DCHECK(function_.instance_prototype(broker->dependencies())
@@ -92,7 +159,8 @@ class PrototypePropertyDependency final : public CompilationDependency {
class StableMapDependency final : public CompilationDependency {
public:
- explicit StableMapDependency(const MapRef& map) : map_(map) {}
+ explicit StableMapDependency(const MapRef& map)
+ : CompilationDependency(kStableMap), map_(map) {}
bool IsValid() const override {
// TODO(v8:11670): Consider turn this back into a CHECK inside the
@@ -117,7 +185,8 @@ class ConstantInDictionaryPrototypeChainDependency final
explicit ConstantInDictionaryPrototypeChainDependency(
const MapRef receiver_map, const NameRef property_name,
const ObjectRef constant, PropertyKind kind)
- : receiver_map_(receiver_map),
+ : CompilationDependency(kConstantInDictionaryPrototypeChain),
+ receiver_map_(receiver_map),
property_name_{property_name},
constant_{constant},
kind_{kind} {
@@ -240,7 +309,8 @@ class OwnConstantDataPropertyDependency final : public CompilationDependency {
const MapRef& map,
Representation representation,
FieldIndex index, const ObjectRef& value)
- : broker_(broker),
+ : CompilationDependency(kOwnConstantDataProperty),
+ broker_(broker),
holder_(holder),
map_(map),
representation_(representation),
@@ -294,7 +364,8 @@ class OwnConstantDictionaryPropertyDependency final
const JSObjectRef& holder,
InternalIndex index,
const ObjectRef& value)
- : broker_(broker),
+ : CompilationDependency(kOwnConstantDictionaryProperty),
+ broker_(broker),
holder_(holder),
map_(holder.map()),
index_(index),
@@ -345,7 +416,7 @@ class OwnConstantDictionaryPropertyDependency final
class ConsistentJSFunctionViewDependency final : public CompilationDependency {
public:
explicit ConsistentJSFunctionViewDependency(const JSFunctionRef& function)
- : function_(function) {}
+ : CompilationDependency(kConsistentJSFunctionView), function_(function) {}
bool IsValid() const override {
return function_.IsConsistentWithHeapState();
@@ -353,17 +424,14 @@ class ConsistentJSFunctionViewDependency final : public CompilationDependency {
void Install(Handle<Code> code) const override {}
-#ifdef DEBUG
- bool IsConsistentJSFunctionViewDependency() const override { return true; }
-#endif
-
private:
const JSFunctionRef function_;
};
class TransitionDependency final : public CompilationDependency {
public:
- explicit TransitionDependency(const MapRef& map) : map_(map) {
+ explicit TransitionDependency(const MapRef& map)
+ : CompilationDependency(kTransition), map_(map) {
DCHECK(map_.CanBeDeprecated());
}
@@ -383,7 +451,9 @@ class PretenureModeDependency final : public CompilationDependency {
public:
PretenureModeDependency(const AllocationSiteRef& site,
AllocationType allocation)
- : site_(site), allocation_(allocation) {}
+ : CompilationDependency(kPretenureMode),
+ site_(site),
+ allocation_(allocation) {}
bool IsValid() const override {
return allocation_ == site_.object()->GetAllocationType();
@@ -396,10 +466,6 @@ class PretenureModeDependency final : public CompilationDependency {
DependentCode::kAllocationSiteTenuringChangedGroup);
}
-#ifdef DEBUG
- bool IsPretenureModeDependency() const override { return true; }
-#endif
-
private:
AllocationSiteRef site_;
AllocationType allocation_;
@@ -409,7 +475,10 @@ class FieldRepresentationDependency final : public CompilationDependency {
public:
FieldRepresentationDependency(const MapRef& map, InternalIndex descriptor,
Representation representation)
- : map_(map), descriptor_(descriptor), representation_(representation) {}
+ : CompilationDependency(kFieldRepresentation),
+ map_(map),
+ descriptor_(descriptor),
+ representation_(representation) {}
bool IsValid() const override {
DisallowGarbageCollection no_heap_allocation;
@@ -433,12 +502,9 @@ class FieldRepresentationDependency final : public CompilationDependency {
DependentCode::kFieldRepresentationGroup);
}
-#ifdef DEBUG
- bool IsFieldRepresentationDependencyOnMap(
- Handle<Map> const& receiver_map) const override {
+ bool DependsOn(const Handle<Map>& receiver_map) const {
return map_.object().equals(receiver_map);
}
-#endif
private:
MapRef map_;
@@ -450,7 +516,10 @@ class FieldTypeDependency final : public CompilationDependency {
public:
FieldTypeDependency(const MapRef& map, InternalIndex descriptor,
const ObjectRef& type)
- : map_(map), descriptor_(descriptor), type_(type) {}
+ : CompilationDependency(kFieldType),
+ map_(map),
+ descriptor_(descriptor),
+ type_(type) {}
bool IsValid() const override {
DisallowGarbageCollection no_heap_allocation;
@@ -481,7 +550,9 @@ class FieldTypeDependency final : public CompilationDependency {
class FieldConstnessDependency final : public CompilationDependency {
public:
FieldConstnessDependency(const MapRef& map, InternalIndex descriptor)
- : map_(map), descriptor_(descriptor) {}
+ : CompilationDependency(kFieldConstness),
+ map_(map),
+ descriptor_(descriptor) {}
bool IsValid() const override {
DisallowGarbageCollection no_heap_allocation;
@@ -515,7 +586,10 @@ class GlobalPropertyDependency final : public CompilationDependency {
public:
GlobalPropertyDependency(const PropertyCellRef& cell, PropertyCellType type,
bool read_only)
- : cell_(cell), type_(type), read_only_(read_only) {
+ : CompilationDependency(kGlobalProperty),
+ cell_(cell),
+ type_(type),
+ read_only_(read_only) {
DCHECK_EQ(type_, cell_.property_details().cell_type());
DCHECK_EQ(read_only_, cell_.property_details().IsReadOnly());
}
@@ -545,7 +619,8 @@ class GlobalPropertyDependency final : public CompilationDependency {
class ProtectorDependency final : public CompilationDependency {
public:
- explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) {}
+ explicit ProtectorDependency(const PropertyCellRef& cell)
+ : CompilationDependency(kProtector), cell_(cell) {}
bool IsValid() const override {
Handle<PropertyCell> cell = cell_.object();
@@ -565,7 +640,7 @@ class ProtectorDependency final : public CompilationDependency {
class ElementsKindDependency final : public CompilationDependency {
public:
ElementsKindDependency(const AllocationSiteRef& site, ElementsKind kind)
- : site_(site), kind_(kind) {
+ : CompilationDependency(kElementsKind), site_(site), kind_(kind) {
DCHECK(AllocationSite::ShouldTrack(kind_));
}
@@ -596,7 +671,10 @@ class OwnConstantElementDependency final : public CompilationDependency {
public:
OwnConstantElementDependency(const JSObjectRef& holder, uint32_t index,
const ObjectRef& element)
- : holder_(holder), index_(index), element_(element) {}
+ : CompilationDependency(kOwnConstantElement),
+ holder_(holder),
+ index_(index),
+ element_(element) {}
bool IsValid() const override {
DisallowGarbageCollection no_gc;
@@ -624,7 +702,9 @@ class InitialMapInstanceSizePredictionDependency final
public:
InitialMapInstanceSizePredictionDependency(const JSFunctionRef& function,
int instance_size)
- : function_(function), instance_size_(instance_size) {}
+ : CompilationDependency(kInitialMapInstanceSizePrediction),
+ function_(function),
+ instance_size_(instance_size) {}
bool IsValid() const override {
// The dependency is valid if the prediction is the same as the current
@@ -651,6 +731,8 @@ class InitialMapInstanceSizePredictionDependency final
int instance_size_;
};
+} // namespace
+
void CompilationDependencies::RecordDependency(
CompilationDependency const* dependency) {
if (dependency != nullptr) dependencies_.push_front(dependency);
@@ -795,9 +877,19 @@ void CompilationDependencies::DependOnOwnConstantDictionaryProperty(
broker_, holder, index, value));
}
+V8_INLINE void TraceInvalidCompilationDependency(
+ const CompilationDependency* d) {
+ DCHECK(FLAG_trace_compilation_dependencies);
+ DCHECK(!d->IsValid());
+ PrintF("Compilation aborted due to invalid dependency: %s\n", d->ToString());
+}
+
bool CompilationDependencies::Commit(Handle<Code> code) {
for (auto dep : dependencies_) {
if (!dep->IsValid()) {
+ if (FLAG_trace_compilation_dependencies) {
+ TraceInvalidCompilationDependency(dep);
+ }
dependencies_.clear();
return false;
}
@@ -812,6 +904,9 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
// can call EnsureHasInitialMap, which can invalidate a StableMapDependency
// on the prototype object's map.
if (!dep->IsValid()) {
+ if (FLAG_trace_compilation_dependencies) {
+ TraceInvalidCompilationDependency(dep);
+ }
dependencies_.clear();
return false;
}
@@ -838,8 +933,7 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
#ifdef DEBUG
for (auto dep : dependencies_) {
CHECK_IMPLIES(!dep->IsValid(),
- dep->IsPretenureModeDependency() ||
- dep->IsConsistentJSFunctionViewDependency());
+ dep->IsPretenureMode() || dep->IsConsistentJSFunctionView());
}
#endif
@@ -848,6 +942,7 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
}
namespace {
+
// This function expects to never see a JSProxy.
void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map,
base::Optional<JSObjectRef> last_prototype) {
@@ -862,8 +957,19 @@ void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map,
if (last_prototype.has_value() && proto.equals(*last_prototype)) break;
}
}
+
} // namespace
+#ifdef DEBUG
+#define V(Name) \
+ const Name##Dependency* CompilationDependency::As##Name() const { \
+ DCHECK(Is##Name()); \
+ return static_cast<const Name##Dependency*>(this); \
+ }
+DEPENDENCY_LIST(V)
+#undef V
+#endif // DEBUG
+
void CompilationDependencies::DependOnStablePrototypeChains(
ZoneVector<MapRef> const& receiver_maps, WhereToStart start,
base::Optional<JSObjectRef> last_prototype) {
@@ -944,6 +1050,17 @@ CompilationDependencies::FieldTypeDependencyOffTheRecord(
return zone_->New<FieldTypeDependency>(map, descriptor, type);
}
+#ifdef DEBUG
+// static
+bool CompilationDependencies::IsFieldRepresentationDependencyOnMap(
+ const CompilationDependency* dep, const Handle<Map>& receiver_map) {
+ return dep->IsFieldRepresentation() &&
+ dep->AsFieldRepresentation()->DependsOn(receiver_map);
+}
+#endif // DEBUG
+
+#undef DEPENDENCY_LIST
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index be507c6843..f4b49878c8 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -154,6 +154,11 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
const MapRef& map, InternalIndex descriptor,
const ObjectRef& /* Contains a FieldType underneath. */ type) const;
+#ifdef DEBUG
+ static bool IsFieldRepresentationDependencyOnMap(
+ const CompilationDependency* dep, const Handle<Map>& receiver_map);
+#endif // DEBUG
+
private:
Zone* const zone_;
JSHeapBroker* const broker_;
diff --git a/deps/v8/src/compiler/compilation-dependency.h b/deps/v8/src/compiler/compilation-dependency.h
deleted file mode 100644
index 852c7b7640..0000000000
--- a/deps/v8/src/compiler/compilation-dependency.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_COMPILATION_DEPENDENCY_H_
-#define V8_COMPILER_COMPILATION_DEPENDENCY_H_
-
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-class MaybeObjectHandle;
-
-namespace compiler {
-
-class CompilationDependency : public ZoneObject {
- public:
- virtual bool IsValid() const = 0;
- virtual void PrepareInstall() const {}
- virtual void Install(Handle<Code> code) const = 0;
-
-#ifdef DEBUG
- virtual bool IsPretenureModeDependency() const { return false; }
- virtual bool IsFieldRepresentationDependencyOnMap(
- Handle<Map> const& receiver_map) const {
- return false;
- }
- virtual bool IsConsistentJSFunctionViewDependency() const { return false; }
-#endif
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_COMPILATION_DEPENDENCY_H_
diff --git a/deps/v8/src/compiler/decompression-optimizer.cc b/deps/v8/src/compiler/decompression-optimizer.cc
index 79e77fcee6..c0068489f7 100644
--- a/deps/v8/src/compiler/decompression-optimizer.cc
+++ b/deps/v8/src/compiler/decompression-optimizer.cc
@@ -15,8 +15,7 @@ namespace {
bool IsMachineLoad(Node* const node) {
const IrOpcode::Value opcode = node->opcode();
- return opcode == IrOpcode::kLoad || opcode == IrOpcode::kPoisonedLoad ||
- opcode == IrOpcode::kProtectedLoad ||
+ return opcode == IrOpcode::kLoad || opcode == IrOpcode::kProtectedLoad ||
opcode == IrOpcode::kUnalignedLoad ||
opcode == IrOpcode::kLoadImmutable;
}
@@ -212,10 +211,6 @@ void DecompressionOptimizer::ChangeLoad(Node* const node) {
NodeProperties::ChangeOp(node,
machine()->LoadImmutable(compressed_load_rep));
break;
- case IrOpcode::kPoisonedLoad:
- NodeProperties::ChangeOp(node,
- machine()->PoisonedLoad(compressed_load_rep));
- break;
case IrOpcode::kProtectedLoad:
NodeProperties::ChangeOp(node,
machine()->ProtectedLoad(compressed_load_rep));
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index d7a0ca62dd..83eb6c215c 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -36,7 +36,6 @@ namespace internal {
namespace compiler {
enum class MaintainSchedule { kMaintain, kDiscard };
-enum class MaskArrayIndexEnable { kDoNotMaskArrayIndex, kMaskArrayIndex };
class EffectControlLinearizer {
public:
@@ -44,13 +43,11 @@ class EffectControlLinearizer {
JSGraphAssembler* graph_assembler, Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- MaskArrayIndexEnable mask_array_index,
MaintainSchedule maintain_schedule,
JSHeapBroker* broker)
: js_graph_(js_graph),
schedule_(schedule),
temp_zone_(temp_zone),
- mask_array_index_(mask_array_index),
maintain_schedule_(maintain_schedule),
source_positions_(source_positions),
node_origins_(node_origins),
@@ -80,7 +77,6 @@ class EffectControlLinearizer {
Node* LowerChangeTaggedToUint32(Node* node);
Node* LowerChangeTaggedToInt64(Node* node);
Node* LowerChangeTaggedToTaggedSigned(Node* node);
- Node* LowerPoisonIndex(Node* node);
Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
void LowerCheckMaps(Node* node, Node* frame_state);
void LowerDynamicCheckMaps(Node* node, Node* frame_state);
@@ -338,7 +334,6 @@ class EffectControlLinearizer {
JSGraph* js_graph_;
Schedule* schedule_;
Zone* temp_zone_;
- MaskArrayIndexEnable mask_array_index_;
MaintainSchedule maintain_schedule_;
RegionObservability region_observability_ = RegionObservability::kObservable;
SourcePositionTable* source_positions_;
@@ -966,9 +961,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kTruncateTaggedToFloat64:
result = LowerTruncateTaggedToFloat64(node);
break;
- case IrOpcode::kPoisonIndex:
- result = LowerPoisonIndex(node);
- break;
case IrOpcode::kCheckClosure:
result = LowerCheckClosure(node, frame_state);
break;
@@ -1788,14 +1780,6 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
return done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerPoisonIndex(Node* node) {
- Node* index = node->InputAt(0);
- if (mask_array_index_ == MaskArrayIndexEnable::kMaskArrayIndex) {
- index = __ Word32PoisonOnSpeculation(index);
- }
- return index;
-}
-
Node* EffectControlLinearizer::LowerCheckClosure(Node* node,
Node* frame_state) {
Handle<FeedbackCell> feedback_cell = FeedbackCellOf(node->op());
@@ -1831,8 +1815,7 @@ void EffectControlLinearizer::MigrateInstanceOrDeopt(
__ Word32And(bitfield3,
__ Int32Constant(Map::Bits3::IsDeprecatedBit::kMask)),
__ Int32Constant(0));
- __ DeoptimizeIf(reason, feedback_source, is_not_deprecated, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
+ __ DeoptimizeIf(reason, feedback_source, is_not_deprecated, frame_state);
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kTryMigrateInstance;
auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
@@ -1842,7 +1825,7 @@ void EffectControlLinearizer::MigrateInstanceOrDeopt(
__ Int32Constant(1), __ NoContextConstant());
Node* check = ObjectIsSmi(result);
__ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, feedback_source,
- check, frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ check, frame_state);
}
void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
@@ -1886,7 +1869,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Node* check = __ TaggedEqual(value_map, map);
if (i == map_count - 1) {
__ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
- frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ frame_state);
} else {
auto next_map = __ MakeLabel();
__ BranchWithCriticalSafetyCheck(check, &done, &next_map);
@@ -1908,7 +1891,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
if (i == map_count - 1) {
__ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
- frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ frame_state);
} else {
auto next_map = __ MakeLabel();
__ BranchWithCriticalSafetyCheck(check, &done, &next_map);
@@ -2528,8 +2511,8 @@ Node* EffectControlLinearizer::LowerCheckedUint32Bounds(Node* node,
Node* check = __ Uint32LessThan(index, limit);
if (!(params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds)) {
__ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
- params.check_parameters().feedback(), check, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
+ params.check_parameters().feedback(), check,
+ frame_state);
} else {
auto if_abort = __ MakeDeferredLabel();
auto done = __ MakeLabel();
@@ -2574,8 +2557,8 @@ Node* EffectControlLinearizer::LowerCheckedUint64Bounds(Node* node,
Node* check = __ Uint64LessThan(index, limit);
if (!(params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds)) {
__ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
- params.check_parameters().feedback(), check, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
+ params.check_parameters().feedback(), check,
+ frame_state);
} else {
auto if_abort = __ MakeDeferredLabel();
auto done = __ MakeLabel();
@@ -3696,9 +3679,14 @@ Node* EffectControlLinearizer::LowerToBoolean(Node* node) {
}
Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
- return ChangeIntPtrToSmi(
+ Node* arguments_length = ChangeIntPtrToSmi(
__ Load(MachineType::Pointer(), __ LoadFramePointer(),
__ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
+ if (kJSArgcIncludesReceiver) {
+ arguments_length =
+ __ SmiSub(arguments_length, __ SmiConstant(kJSArgcReceiverSlots));
+ }
+ return arguments_length;
}
Node* EffectControlLinearizer::LowerRestLength(Node* node) {
@@ -3711,6 +3699,10 @@ Node* EffectControlLinearizer::LowerRestLength(Node* node) {
Node* arguments_length = ChangeIntPtrToSmi(
__ Load(MachineType::Pointer(), frame,
__ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
+ if (kJSArgcIncludesReceiver) {
+ arguments_length =
+ __ SmiSub(arguments_length, __ SmiConstant(kJSArgcReceiverSlots));
+ }
Node* rest_length =
__ SmiSub(arguments_length, __ SmiConstant(formal_parameter_count));
__ GotoIf(__ SmiLessThan(rest_length, __ SmiConstant(0)), &done,
@@ -4263,12 +4255,10 @@ Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
Node* EffectControlLinearizer::LowerStringToLowerCaseIntl(Node* node) {
UNREACHABLE();
- return nullptr;
}
Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
UNREACHABLE();
- return nullptr;
}
#endif // V8_INTL_SUPPORT
@@ -5776,8 +5766,7 @@ Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
Node* data_ptr = BuildTypedArrayDataPointer(base, external);
// Perform the actual typed element access.
- return __ LoadElement(AccessBuilder::ForTypedArrayElement(
- array_type, true, LoadSensitivity::kCritical),
+ return __ LoadElement(AccessBuilder::ForTypedArrayElement(array_type, true),
data_ptr, index);
}
@@ -6796,26 +6785,13 @@ Node* EffectControlLinearizer::BuildIsClearedWeakReference(Node* maybe_object) {
#undef __
-namespace {
-
-MaskArrayIndexEnable MaskArrayForPoisonLevel(
- PoisoningMitigationLevel poison_level) {
- return (poison_level != PoisoningMitigationLevel::kDontPoison)
- ? MaskArrayIndexEnable::kMaskArrayIndex
- : MaskArrayIndexEnable::kDoNotMaskArrayIndex;
-}
-
-} // namespace
-
void LinearizeEffectControl(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- PoisoningMitigationLevel poison_level,
JSHeapBroker* broker) {
JSGraphAssembler graph_assembler_(graph, temp_zone, base::nullopt, nullptr);
EffectControlLinearizer linearizer(graph, schedule, &graph_assembler_,
temp_zone, source_positions, node_origins,
- MaskArrayForPoisonLevel(poison_level),
MaintainSchedule::kDiscard, broker);
linearizer.Run();
}
@@ -6824,16 +6800,13 @@ void LowerToMachineSchedule(JSGraph* js_graph, Schedule* schedule,
Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- PoisoningMitigationLevel poison_level,
JSHeapBroker* broker) {
JSGraphAssembler graph_assembler(js_graph, temp_zone, base::nullopt,
schedule);
EffectControlLinearizer linearizer(js_graph, schedule, &graph_assembler,
temp_zone, source_positions, node_origins,
- MaskArrayForPoisonLevel(poison_level),
MaintainSchedule::kMaintain, broker);
- MemoryLowering memory_lowering(js_graph, temp_zone, &graph_assembler,
- poison_level);
+ MemoryLowering memory_lowering(js_graph, temp_zone, &graph_assembler);
SelectLowering select_lowering(&graph_assembler, js_graph->graph());
graph_assembler.AddInlineReducer(&memory_lowering);
graph_assembler.AddInlineReducer(&select_lowering);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index fca4899263..97467391e2 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -26,7 +26,7 @@ class JSHeapBroker;
V8_EXPORT_PRIVATE void LinearizeEffectControl(
JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- PoisoningMitigationLevel poison_level, JSHeapBroker* broker);
+ JSHeapBroker* broker);
// Performs effect control linearization lowering in addition to machine
// lowering, producing a scheduled graph that is ready for instruction
@@ -34,7 +34,7 @@ V8_EXPORT_PRIVATE void LinearizeEffectControl(
V8_EXPORT_PRIVATE void LowerToMachineSchedule(
JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- PoisoningMitigationLevel poison_level, JSHeapBroker* broker);
+ JSHeapBroker* broker);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index bbc2049ae5..c5199f1e64 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -214,8 +214,11 @@ FrameState CreateJavaScriptBuiltinContinuationFrameState(
ContinuationFrameStateMode mode) {
// Depending on {mode}, final parameters are added by the deoptimizer
// and aren't explicitly passed in the frame state.
- DCHECK_EQ(Builtins::GetStackParameterCount(name) + 1, // add receiver
- stack_parameter_count + DeoptimizerParameterCountFor(mode));
+ DCHECK_EQ(
+ Builtins::GetStackParameterCount(name) +
+ (kJSArgcIncludesReceiver ? 0
+ : 1), // Add receiver if it is not included.
+ stack_parameter_count + DeoptimizerParameterCountFor(mode));
Node* argc = jsgraph->Constant(Builtins::GetStackParameterCount(name));
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 26ae88362d..6bfd6f8c22 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -829,46 +829,36 @@ Node* GraphAssembler::BitcastMaybeObjectToWord(Node* value) {
effect(), control()));
}
-Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) {
- return AddNode(graph()->NewNode(machine()->Word32PoisonOnSpeculation(), value,
- effect(), control()));
-}
-
Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check) {
- return AddNode(
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeKind::kEager, reason,
- feedback, is_safety_check),
- condition, frame_state, effect(), control()));
+ Node* condition, Node* frame_state) {
+ return AddNode(graph()->NewNode(
+ common()->DeoptimizeIf(DeoptimizeKind::kEager, reason, feedback),
+ condition, frame_state, effect(), control()));
}
Node* GraphAssembler::DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check) {
- return AddNode(graph()->NewNode(
- common()->DeoptimizeIf(kind, reason, feedback, is_safety_check),
- condition, frame_state, effect(), control()));
+ Node* condition, Node* frame_state) {
+ return AddNode(
+ graph()->NewNode(common()->DeoptimizeIf(kind, reason, feedback),
+ condition, frame_state, effect(), control()));
}
Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeKind kind,
DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check) {
- return AddNode(graph()->NewNode(
- common()->DeoptimizeUnless(kind, reason, feedback, is_safety_check),
- condition, frame_state, effect(), control()));
+ Node* condition, Node* frame_state) {
+ return AddNode(
+ graph()->NewNode(common()->DeoptimizeUnless(kind, reason, feedback),
+ condition, frame_state, effect(), control()));
}
Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check) {
+ Node* condition, Node* frame_state) {
return DeoptimizeIfNot(DeoptimizeKind::kEager, reason, feedback, condition,
- frame_state, is_safety_check);
+ frame_state);
}
Node* GraphAssembler::DynamicCheckMapsWithDeoptUnless(Node* condition,
@@ -924,8 +914,7 @@ void GraphAssembler::BranchWithCriticalSafetyCheck(
hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse;
}
- BranchImpl(condition, if_true, if_false, hint,
- IsSafetyCheck::kCriticalSafetyCheck);
+ BranchImpl(condition, if_true, if_false, hint);
}
void GraphAssembler::RecordBranchInBlockUpdater(Node* branch,
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 5efe6dd9c3..c9ddd63e71 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -330,24 +330,16 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* Retain(Node* buffer);
Node* UnsafePointerAdd(Node* base, Node* external);
- Node* Word32PoisonOnSpeculation(Node* value);
-
- Node* DeoptimizeIf(
- DeoptimizeReason reason, FeedbackSource const& feedback, Node* condition,
- Node* frame_state,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
- Node* DeoptimizeIf(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
- Node* DeoptimizeIfNot(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
- Node* DeoptimizeIfNot(
- DeoptimizeReason reason, FeedbackSource const& feedback, Node* condition,
- Node* frame_state,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
+ Node* DeoptimizeIf(DeoptimizeReason reason, FeedbackSource const& feedback,
+ Node* condition, Node* frame_state);
+ Node* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
+ FeedbackSource const& feedback, Node* condition,
+ Node* frame_state);
+ Node* DeoptimizeIfNot(DeoptimizeKind kind, DeoptimizeReason reason,
+ FeedbackSource const& feedback, Node* condition,
+ Node* frame_state);
+ Node* DeoptimizeIfNot(DeoptimizeReason reason, FeedbackSource const& feedback,
+ Node* condition, Node* frame_state);
Node* DynamicCheckMapsWithDeoptUnless(Node* condition, Node* slot_index,
Node* map, Node* handler,
Node* feedback_vector,
@@ -557,7 +549,7 @@ class V8_EXPORT_PRIVATE GraphAssembler {
void BranchImpl(Node* condition,
GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false,
- BranchHint hint, IsSafetyCheck is_safety_check, Vars...);
+ BranchHint hint, Vars...);
void RecordBranchInBlockUpdater(Node* branch, Node* if_true_control,
Node* if_false_control,
BasicBlock* if_true_block,
@@ -742,8 +734,7 @@ void GraphAssembler::Branch(Node* condition,
hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse;
}
- BranchImpl(condition, if_true, if_false, hint, IsSafetyCheck::kNoSafetyCheck,
- vars...);
+ BranchImpl(condition, if_true, if_false, hint, vars...);
}
template <typename... Vars>
@@ -751,20 +742,17 @@ void GraphAssembler::BranchWithHint(
Node* condition, GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false, BranchHint hint,
Vars... vars) {
- BranchImpl(condition, if_true, if_false, hint, IsSafetyCheck::kNoSafetyCheck,
- vars...);
+ BranchImpl(condition, if_true, if_false, hint, vars...);
}
template <typename... Vars>
void GraphAssembler::BranchImpl(Node* condition,
GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false,
- BranchHint hint, IsSafetyCheck is_safety_check,
- Vars... vars) {
+ BranchHint hint, Vars... vars) {
DCHECK_NOT_NULL(control());
- Node* branch = graph()->NewNode(common()->Branch(hint, is_safety_check),
- condition, control());
+ Node* branch = graph()->NewNode(common()->Branch(hint), condition, control());
Node* if_true_control = control_ =
graph()->NewNode(common()->IfTrue(), branch);
diff --git a/deps/v8/src/compiler/heap-refs.cc b/deps/v8/src/compiler/heap-refs.cc
index 1688a14a04..c246430de2 100644
--- a/deps/v8/src/compiler/heap-refs.cc
+++ b/deps/v8/src/compiler/heap-refs.cc
@@ -14,7 +14,6 @@
#include "src/base/platform/platform.h"
#include "src/codegen/code-factory.h"
#include "src/compiler/compilation-dependencies.h"
-#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-heap-broker.h"
#include "src/execution/protectors-inl.h"
#include "src/objects/allocation-site-inl.h"
@@ -41,7 +40,7 @@ namespace compiler {
//
// kBackgroundSerializedHeapObject: The underlying V8 object is a HeapObject
// and the data is an instance of the corresponding (most-specific) subclass,
-// e.g. JSFunctionData, which provides serialized information about the
+// e.g. JSFunctionData, which provides serialized information about the
// object. Allows serialization from the background thread.
//
// kUnserializedHeapObject: The underlying V8 object is a HeapObject and the
@@ -257,13 +256,9 @@ bool PropertyCellData::Cache(JSHeapBroker* broker) {
}
}
- if (property_details.cell_type() == PropertyCellType::kConstant) {
- Handle<Object> value_again =
- broker->CanonicalPersistentHandle(cell->value(kAcquireLoad));
- if (*value != *value_again) {
- DCHECK(!broker->IsMainThread());
- return false;
- }
+ if (property_details.cell_type() == PropertyCellType::kInTransition) {
+ DCHECK(!broker->IsMainThread());
+ return false;
}
ObjectData* value_data = broker->TryGetOrCreateData(value);
@@ -317,17 +312,6 @@ class JSObjectData : public JSReceiverData {
return object_create_map_;
}
- ObjectData* GetOwnConstantElement(
- JSHeapBroker* broker, uint32_t index,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
- ObjectData* GetOwnFastDataProperty(
- JSHeapBroker* broker, Representation representation,
- FieldIndex field_index,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
- ObjectData* GetOwnDictionaryProperty(JSHeapBroker* broker,
- InternalIndex dict_index,
- SerializationPolicy policy);
-
// This method is only used to assert our invariants.
bool cow_or_empty_elements_tenured() const;
@@ -349,21 +333,6 @@ class JSObjectData : public JSReceiverData {
bool serialized_object_create_map_ = false;
ObjectData* object_create_map_ = nullptr;
-
- // Elements (indexed properties) that either
- // (1) are known to exist directly on the object as non-writable and
- // non-configurable, or (2) are known not to (possibly they don't exist at
- // all). In case (2), the second pair component is nullptr.
- ZoneVector<std::pair<uint32_t, ObjectData*>> own_constant_elements_;
- // Properties that either:
- // (1) are known to exist directly on the object, or
- // (2) are known not to (possibly they don't exist at all).
- // In case (2), the second pair component is nullptr.
- // For simplicity, this may in theory overlap with inobject_fields_.
- // For fast mode objects, the keys of the map are the property_index() values
- // of the respective property FieldIndex'es. For slow mode objects, the keys
- // are the dictionary indicies.
- ZoneUnorderedMap<int, ObjectData*> own_properties_;
};
void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker,
@@ -390,18 +359,6 @@ void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker,
namespace {
-base::Optional<ObjectRef> GetOwnElementFromHeap(JSHeapBroker* broker,
- Handle<Object> receiver,
- uint32_t index,
- bool constant_only) {
- LookupIterator it(broker->isolate(), receiver, index, LookupIterator::OWN);
- if (it.state() == LookupIterator::DATA &&
- (!constant_only || (it.IsReadOnly() && !it.IsConfigurable()))) {
- return MakeRef(broker, it.GetDataValue());
- }
- return base::nullopt;
-}
-
base::Optional<ObjectRef> GetOwnFastDataPropertyFromHeap(
JSHeapBroker* broker, JSObjectRef holder, Representation representation,
FieldIndex field_index) {
@@ -496,70 +453,6 @@ base::Optional<ObjectRef> GetOwnDictionaryPropertyFromHeap(
} // namespace
-ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker,
- uint32_t index,
- SerializationPolicy policy) {
- for (auto const& p : own_constant_elements_) {
- if (p.first == index) return p.second;
- }
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
- return nullptr;
- }
-
- base::Optional<ObjectRef> element =
- GetOwnElementFromHeap(broker, object(), index, true);
- ObjectData* result = element.has_value() ? element->data() : nullptr;
- own_constant_elements_.push_back({index, result});
- return result;
-}
-
-ObjectData* JSObjectData::GetOwnFastDataProperty(JSHeapBroker* broker,
- Representation representation,
- FieldIndex field_index,
- SerializationPolicy policy) {
- auto p = own_properties_.find(field_index.property_index());
- if (p != own_properties_.end()) return p->second;
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about fast property with index "
- << field_index.property_index() << " on "
- << this);
- return nullptr;
- }
-
- // This call will always succeed on the main thread.
- CHECK(broker->IsMainThread());
- JSObjectRef object_ref = MakeRef(broker, Handle<JSObject>::cast(object()));
- ObjectRef property = GetOwnFastDataPropertyFromHeap(
- broker, object_ref, representation, field_index)
- .value();
- ObjectData* result(property.data());
- own_properties_.insert(std::make_pair(field_index.property_index(), result));
- return result;
-}
-
-ObjectData* JSObjectData::GetOwnDictionaryProperty(JSHeapBroker* broker,
- InternalIndex dict_index,
- SerializationPolicy policy) {
- auto p = own_properties_.find(dict_index.as_int());
- if (p != own_properties_.end()) return p->second;
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about dictionary property with index "
- << dict_index.as_int() << " on " << this);
- return nullptr;
- }
-
- ObjectRef property = GetOwnDictionaryPropertyFromHeap(
- broker, Handle<JSObject>::cast(object()), dict_index)
- .value();
- ObjectData* result(property.data());
- own_properties_.insert(std::make_pair(dict_index.as_int(), result));
- return result;
-}
-
class JSTypedArrayData : public JSObjectData {
public:
JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
@@ -625,28 +518,6 @@ class JSBoundFunctionData : public JSObjectData {
JSBoundFunctionData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSBoundFunction> object, ObjectDataKind kind)
: JSObjectData(broker, storage, object, kind) {}
-
- bool Serialize(JSHeapBroker* broker, NotConcurrentInliningTag tag);
-
- ObjectData* bound_target_function() const {
- DCHECK(!broker()->is_concurrent_inlining());
- return bound_target_function_;
- }
- ObjectData* bound_this() const {
- DCHECK(!broker()->is_concurrent_inlining());
- return bound_this_;
- }
- ObjectData* bound_arguments() const {
- DCHECK(!broker()->is_concurrent_inlining());
- return bound_arguments_;
- }
-
- private:
- bool serialized_ = false;
-
- ObjectData* bound_target_function_ = nullptr;
- ObjectData* bound_this_ = nullptr;
- ObjectData* bound_arguments_ = nullptr;
};
class JSFunctionData : public JSObjectData {
@@ -659,10 +530,6 @@ class JSFunctionData : public JSObjectData {
bool IsConsistentWithHeapState(JSHeapBroker* broker) const;
- bool has_feedback_vector() const {
- DCHECK(serialized_);
- return has_feedback_vector_;
- }
bool has_initial_map() const {
DCHECK(serialized_);
return has_initial_map_;
@@ -680,10 +547,6 @@ class JSFunctionData : public JSObjectData {
DCHECK(serialized_);
return context_;
}
- ObjectData* native_context() const {
- DCHECK(serialized_);
- return native_context_;
- }
MapData* initial_map() const {
DCHECK(serialized_);
return initial_map_;
@@ -700,10 +563,6 @@ class JSFunctionData : public JSObjectData {
DCHECK(serialized_);
return feedback_cell_;
}
- ObjectData* feedback_vector() const {
- DCHECK(serialized_);
- return feedback_vector_;
- }
int initial_map_instance_size_with_min_slack() const {
DCHECK(serialized_);
return initial_map_instance_size_with_min_slack_;
@@ -740,19 +599,16 @@ class JSFunctionData : public JSObjectData {
using UsedFields = base::Flags<UsedField>;
UsedFields used_fields_;
- bool has_feedback_vector_ = false;
ObjectData* prototype_or_initial_map_ = nullptr;
bool has_initial_map_ = false;
bool has_instance_prototype_ = false;
bool PrototypeRequiresRuntimeLookup_ = false;
ObjectData* context_ = nullptr;
- ObjectData* native_context_ = nullptr; // Derives from context_.
MapData* initial_map_ = nullptr; // Derives from prototype_or_initial_map_.
ObjectData* instance_prototype_ =
nullptr; // Derives from prototype_or_initial_map_.
ObjectData* shared_ = nullptr;
- ObjectData* feedback_vector_ = nullptr; // Derives from feedback_cell.
ObjectData* feedback_cell_ = nullptr;
int initial_map_instance_size_with_min_slack_; // Derives from
// prototype_or_initial_map_.
@@ -809,10 +665,6 @@ class MapData : public HeapObjectData {
return is_abandoned_prototype_map_;
}
- // Extra information.
- void SerializeRootMap(JSHeapBroker* broker, NotConcurrentInliningTag tag);
- ObjectData* FindRootMap() const;
-
void SerializeConstructor(JSHeapBroker* broker, NotConcurrentInliningTag tag);
ObjectData* GetConstructor() const {
CHECK(serialized_constructor_);
@@ -840,8 +692,7 @@ class MapData : public HeapObjectData {
bool has_extra_serialized_data() const {
return serialized_constructor_ || serialized_backpointer_ ||
- serialized_prototype_ || serialized_root_map_ ||
- serialized_for_element_store_;
+ serialized_prototype_ || serialized_for_element_store_;
}
private:
@@ -881,9 +732,6 @@ class MapData : public HeapObjectData {
bool serialized_prototype_ = false;
ObjectData* prototype_ = nullptr;
- bool serialized_root_map_ = false;
- ObjectData* root_map_ = nullptr;
-
bool serialized_for_element_store_ = false;
};
@@ -938,16 +786,13 @@ void JSFunctionData::Cache(JSHeapBroker* broker) {
// guaranteed to see an initialized JSFunction object, and after
// initialization fields remain in a valid state.
- Context context = function->context(kRelaxedLoad);
- context_ = broker->GetOrCreateData(context, kAssumeMemoryFence);
- CHECK(context_->IsContext());
+ ContextRef context =
+ MakeRefAssumeMemoryFence(broker, function->context(kRelaxedLoad));
+ context_ = context.data();
- native_context_ = broker->GetOrCreateData(context.map().native_context(),
- kAssumeMemoryFence);
- CHECK(native_context_->IsNativeContext());
-
- SharedFunctionInfo shared = function->shared(kRelaxedLoad);
- shared_ = broker->GetOrCreateData(shared, kAssumeMemoryFence);
+ SharedFunctionInfoRef shared =
+ MakeRefAssumeMemoryFence(broker, function->shared(kRelaxedLoad));
+ shared_ = shared.data();
if (function->has_prototype_slot()) {
prototype_or_initial_map_ = broker->GetOrCreateData(
@@ -981,9 +826,10 @@ void JSFunctionData::Cache(JSHeapBroker* broker) {
if (has_initial_map_) {
has_instance_prototype_ = true;
- instance_prototype_ = broker->GetOrCreateData(
- Handle<Map>::cast(initial_map_->object())->prototype(),
- kAssumeMemoryFence);
+ instance_prototype_ =
+ MakeRefAssumeMemoryFence(
+ broker, Handle<Map>::cast(initial_map_->object())->prototype())
+ .data();
} else if (prototype_or_initial_map_->IsHeapObject() &&
!Handle<HeapObject>::cast(prototype_or_initial_map_->object())
->IsTheHole()) {
@@ -994,15 +840,9 @@ void JSFunctionData::Cache(JSHeapBroker* broker) {
PrototypeRequiresRuntimeLookup_ = function->PrototypeRequiresRuntimeLookup();
- FeedbackCell feedback_cell = function->raw_feedback_cell(kAcquireLoad);
- feedback_cell_ = broker->GetOrCreateData(feedback_cell, kAssumeMemoryFence);
-
- ObjectData* maybe_feedback_vector = broker->GetOrCreateData(
- feedback_cell.value(kAcquireLoad), kAssumeMemoryFence);
- if (shared.is_compiled() && maybe_feedback_vector->IsFeedbackVector()) {
- has_feedback_vector_ = true;
- feedback_vector_ = maybe_feedback_vector;
- }
+ FeedbackCellRef feedback_cell = MakeRefAssumeMemoryFence(
+ broker, function->raw_feedback_cell(kAcquireLoad));
+ feedback_cell_ = feedback_cell.data();
#ifdef DEBUG
serialized_ = true;
@@ -1016,7 +856,6 @@ bool JSFunctionData::IsConsistentWithHeapState(JSHeapBroker* broker) const {
Handle<JSFunction> f = Handle<JSFunction>::cast(object());
CHECK_EQ(*context_->object(), f->context());
- CHECK_EQ(*native_context_->object(), f->native_context());
CHECK_EQ(*shared_->object(), f->shared());
if (f->has_prototype_slot()) {
@@ -1080,22 +919,6 @@ bool JSFunctionData::IsConsistentWithHeapState(JSHeapBroker* broker) const {
return false;
}
- if (has_used_field(kHasFeedbackVector) &&
- has_feedback_vector_ != f->has_feedback_vector()) {
- TRACE_BROKER_MISSING(broker, "JSFunction::has_feedback_vector");
- return false;
- }
-
- if (has_feedback_vector_) {
- if (has_used_field(kFeedbackVector) &&
- *feedback_vector_->object() != f->feedback_vector()) {
- TRACE_BROKER_MISSING(broker, "JSFunction::feedback_vector");
- return false;
- }
- } else {
- DCHECK_NULL(feedback_vector_);
- }
-
return true;
}
@@ -1269,61 +1092,16 @@ class ScriptContextTableData : public FixedArrayData {
: FixedArrayData(broker, storage, object, kind) {}
};
-bool JSBoundFunctionData::Serialize(JSHeapBroker* broker,
- NotConcurrentInliningTag tag) {
- DCHECK(!broker->is_concurrent_inlining());
-
- if (serialized_) return true;
- if (broker->StackHasOverflowed()) return false;
-
- TraceScope tracer(broker, this, "JSBoundFunctionData::Serialize");
- Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(object());
-
- // We don't immediately set {serialized_} in order to correctly handle the
- // case where a recursive call to this method reaches the stack limit.
-
- DCHECK_NULL(bound_target_function_);
- bound_target_function_ =
- broker->GetOrCreateData(function->bound_target_function());
- bool serialized_nested = true;
- if (!bound_target_function_->should_access_heap()) {
- if (bound_target_function_->IsJSBoundFunction()) {
- serialized_nested =
- bound_target_function_->AsJSBoundFunction()->Serialize(broker, tag);
- }
- }
- if (!serialized_nested) {
- // We couldn't serialize all nested bound functions due to stack
- // overflow. Give up.
- DCHECK(!serialized_);
- bound_target_function_ = nullptr; // Reset to sync with serialized_.
- return false;
- }
-
- serialized_ = true;
-
- DCHECK_NULL(bound_arguments_);
- bound_arguments_ = broker->GetOrCreateData(function->bound_arguments());
-
- DCHECK_NULL(bound_this_);
- bound_this_ = broker->GetOrCreateData(function->bound_this());
-
- return true;
-}
-
JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSObject> object, ObjectDataKind kind)
: JSReceiverData(broker, storage, object, kind),
- inobject_fields_(broker->zone()),
- own_constant_elements_(broker->zone()),
- own_properties_(broker->zone()) {}
+ inobject_fields_(broker->zone()) {}
class JSArrayData : public JSObjectData {
public:
JSArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSArray> object, ObjectDataKind kind)
- : JSObjectData(broker, storage, object, kind),
- own_elements_(broker->zone()) {}
+ : JSObjectData(broker, storage, object, kind) {}
void Serialize(JSHeapBroker* broker, NotConcurrentInliningTag tag);
ObjectData* length() const {
@@ -1331,19 +1109,9 @@ class JSArrayData : public JSObjectData {
return length_;
}
- ObjectData* GetOwnElement(
- JSHeapBroker* broker, uint32_t index,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
-
private:
bool serialized_ = false;
ObjectData* length_ = nullptr;
-
- // Elements (indexed properties) that either
- // (1) are known to exist directly on the object, or
- // (2) are known not to (possibly they don't exist at all).
- // In case (2), the second pair component is nullptr.
- ZoneVector<std::pair<uint32_t, ObjectData*>> own_elements_;
};
void JSArrayData::Serialize(JSHeapBroker* broker,
@@ -1358,52 +1126,11 @@ void JSArrayData::Serialize(JSHeapBroker* broker,
length_ = broker->GetOrCreateData(jsarray->length());
}
-ObjectData* JSArrayData::GetOwnElement(JSHeapBroker* broker, uint32_t index,
- SerializationPolicy policy) {
- for (auto const& p : own_elements_) {
- if (p.first == index) return p.second;
- }
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
- return nullptr;
- }
-
- base::Optional<ObjectRef> element =
- GetOwnElementFromHeap(broker, object(), index, false);
- ObjectData* result = element.has_value() ? element->data() : nullptr;
- own_elements_.push_back({index, result});
- return result;
-}
-
class JSGlobalObjectData : public JSObjectData {
public:
JSGlobalObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSGlobalObject> object, ObjectDataKind kind)
- : JSObjectData(broker, storage, object, kind),
- properties_(broker->zone()) {
- if (!broker->is_concurrent_inlining()) {
- is_detached_ = object->IsDetached();
- }
- }
-
- bool IsDetached() const {
- return is_detached_;
- }
-
- ObjectData* GetPropertyCell(
- JSHeapBroker* broker, ObjectData* name,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
-
- private:
- // Only valid if not concurrent inlining.
- bool is_detached_ = false;
-
- // Properties that either
- // (1) are known to exist as property cells on the global object, or
- // (2) are known not to (possibly they don't exist at all).
- // In case (2), the second pair component is nullptr.
- ZoneVector<std::pair<ObjectData*, ObjectData*>> properties_;
+ : JSObjectData(broker, storage, object, kind) {}
};
class JSGlobalProxyData : public JSObjectData {
@@ -1413,46 +1140,6 @@ class JSGlobalProxyData : public JSObjectData {
: JSObjectData(broker, storage, object, kind) {}
};
-namespace {
-
-base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker,
- Handle<Name> name) {
- base::Optional<PropertyCell> maybe_cell =
- ConcurrentLookupIterator::TryGetPropertyCell(
- broker->isolate(), broker->local_isolate_or_isolate(),
- broker->target_native_context().global_object().object(), name);
- if (!maybe_cell.has_value()) return {};
- return TryMakeRef(broker, *maybe_cell);
-}
-
-} // namespace
-
-ObjectData* JSGlobalObjectData::GetPropertyCell(JSHeapBroker* broker,
- ObjectData* name,
- SerializationPolicy policy) {
- CHECK_NOT_NULL(name);
- for (auto const& p : properties_) {
- if (p.first == name) return p.second;
- }
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about global property " << name);
- return nullptr;
- }
-
- ObjectData* result = nullptr;
- base::Optional<PropertyCellRef> cell =
- GetPropertyCellFromHeap(broker, Handle<Name>::cast(name->object()));
- if (cell.has_value()) {
- result = cell->data();
- if (!result->should_access_heap()) {
- result->AsPropertyCell()->Cache(broker);
- }
- }
- properties_.push_back({name, result});
- return result;
-}
-
#define DEFINE_IS(Name) \
bool ObjectData::Is##Name() const { \
if (should_access_heap()) { \
@@ -1540,19 +1227,6 @@ bool MapData::TrySerializePrototype(JSHeapBroker* broker,
return true;
}
-void MapData::SerializeRootMap(JSHeapBroker* broker,
- NotConcurrentInliningTag tag) {
- if (serialized_root_map_) return;
- serialized_root_map_ = true;
-
- TraceScope tracer(broker, this, "MapData::SerializeRootMap");
- Handle<Map> map = Handle<Map>::cast(object());
- DCHECK_NULL(root_map_);
- root_map_ = broker->GetOrCreateData(map->FindRootMap(broker->isolate()));
-}
-
-ObjectData* MapData::FindRootMap() const { return root_map_; }
-
bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker,
NotConcurrentInliningTag tag,
int max_depth) {
@@ -1693,8 +1367,6 @@ void JSHeapBroker::InitializeAndStartSerializing() {
SetTargetNativeContextRef(target_native_context().object());
if (!is_concurrent_inlining()) {
- target_native_context().Serialize(NotConcurrentInliningTag{this});
-
Factory* const f = isolate()->factory();
ObjectData* data;
data = GetOrCreateData(f->array_buffer_detaching_protector());
@@ -1838,6 +1510,19 @@ int ObjectRef::AsSmi() const {
INSTANCE_TYPE_CHECKERS(DEF_TESTER)
#undef DEF_TESTER
+bool MapRef::CanInlineElementAccess() const {
+ if (!IsJSObjectMap()) return false;
+ if (is_access_check_needed()) return false;
+ if (has_indexed_interceptor()) return false;
+ ElementsKind kind = elements_kind();
+ if (IsFastElementsKind(kind)) return true;
+ if (IsTypedArrayElementsKind(kind) && kind != BIGUINT64_ELEMENTS &&
+ kind != BIGINT64_ELEMENTS) {
+ return true;
+ }
+ return false;
+}
+
base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
const ElementsKind current_kind = elements_kind();
if (kind == current_kind) return *this;
@@ -1931,6 +1616,11 @@ void RecordConsistentJSFunctionViewDependencyIfNeeded(
} // namespace
+base::Optional<FeedbackVectorRef> JSFunctionRef::feedback_vector(
+ CompilationDependencies* dependencies) const {
+ return raw_feedback_cell(dependencies).feedback_vector();
+}
+
int JSFunctionRef::InitialMapInstanceSizeWithMinSlack(
CompilationDependencies* dependencies) const {
if (data_->should_access_heap()) {
@@ -2096,25 +1786,21 @@ ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
}
base::Optional<ObjectRef> StringRef::GetCharAsStringOrUndefined(
- uint32_t index, SerializationPolicy policy) const {
- if (broker()->is_concurrent_inlining()) {
- String maybe_char;
- auto result = ConcurrentLookupIterator::TryGetOwnChar(
- &maybe_char, broker()->isolate(), broker()->local_isolate(), *object(),
- index);
-
- if (result == ConcurrentLookupIterator::kGaveUp) {
- TRACE_BROKER_MISSING(broker(), "StringRef::GetCharAsStringOrUndefined on "
- << *this << " at index " << index);
- return {};
- }
+ uint32_t index) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ String maybe_char;
+ auto result = ConcurrentLookupIterator::TryGetOwnChar(
+ &maybe_char, broker()->isolate(), broker()->local_isolate(), *object(),
+ index);
- DCHECK_EQ(result, ConcurrentLookupIterator::kPresent);
- return TryMakeRef(broker(), maybe_char);
+ if (result == ConcurrentLookupIterator::kGaveUp) {
+ TRACE_BROKER_MISSING(broker(), "StringRef::GetCharAsStringOrUndefined on "
+ << *this << " at index " << index);
+ return {};
}
- CHECK_EQ(data_->kind(), ObjectDataKind::kUnserializedHeapObject);
- return GetOwnElementFromHeap(broker(), object(), index, true);
+ DCHECK_EQ(result, ConcurrentLookupIterator::kPresent);
+ return TryMakeRef(broker(), maybe_char);
}
bool StringRef::SupportedStringKind() const {
@@ -2165,8 +1851,6 @@ int ArrayBoilerplateDescriptionRef::constants_elements_length() const {
return object()->constant_elements().length();
}
-ObjectRef FixedArrayRef::get(int i) const { return TryGet(i).value(); }
-
base::Optional<ObjectRef> FixedArrayRef::TryGet(int i) const {
Handle<Object> value;
{
@@ -2234,26 +1918,17 @@ int BytecodeArrayRef::handler_table_size() const {
return BitField::decode(ObjectRef::data()->As##holder()->field()); \
}
-// Like IF_ACCESS_FROM_HEAP[_C] but we also allow direct heap access for
+// Like IF_ACCESS_FROM_HEAP but we also allow direct heap access for
// kBackgroundSerialized only for methods that we identified to be safe.
-#define IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name) \
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { \
- return MakeRef(broker(), result::cast(object()->name())); \
- }
#define IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name) \
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { \
return object()->name(); \
}
-// Like BIMODAL_ACCESSOR[_C] except that we force a direct heap access if
+// Like BIMODAL_ACCESSOR except that we force a direct heap access if
// broker()->is_concurrent_inlining() is true (even for kBackgroundSerialized).
// This is because we identified the method to be safe to use direct heap
// access, but the holder##Data class still needs to be serialized.
-#define BIMODAL_ACCESSOR_WITH_FLAG(holder, result, name) \
- result##Ref holder##Ref::name() const { \
- IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name); \
- return result##Ref(broker(), ObjectRef::data()->As##holder()->name()); \
- }
#define BIMODAL_ACCESSOR_WITH_FLAG_C(holder, result, name) \
result holder##Ref::name() const { \
IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name); \
@@ -2298,31 +1973,22 @@ uint64_t HeapNumberRef::value_as_bits() const {
return object()->value_as_bits(kRelaxedLoad);
}
-base::Optional<JSReceiverRef> JSBoundFunctionRef::bound_target_function()
- const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Immutable after initialization.
- return TryMakeRef(broker(), object()->bound_target_function(),
- kAssumeMemoryFence);
- }
- return TryMakeRef<JSReceiver>(
- broker(), data()->AsJSBoundFunction()->bound_target_function());
+JSReceiverRef JSBoundFunctionRef::bound_target_function() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->bound_target_function());
}
-base::Optional<ObjectRef> JSBoundFunctionRef::bound_this() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Immutable after initialization.
- return TryMakeRef(broker(), object()->bound_this(), kAssumeMemoryFence);
- }
- return TryMakeRef<Object>(broker(),
- data()->AsJSBoundFunction()->bound_this());
+
+ObjectRef JSBoundFunctionRef::bound_this() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->bound_this());
}
+
FixedArrayRef JSBoundFunctionRef::bound_arguments() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Immutable after initialization.
- return MakeRefAssumeMemoryFence(broker(), object()->bound_arguments());
- }
- return FixedArrayRef(broker(),
- data()->AsJSBoundFunction()->bound_arguments());
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->bound_arguments());
}
// Immutable after initialization.
@@ -2354,8 +2020,6 @@ BIMODAL_ACCESSOR_C(Map, int, instance_size)
BIMODAL_ACCESSOR_WITH_FLAG_C(Map, int, NextFreePropertyIndex)
BIMODAL_ACCESSOR_C(Map, int, UnusedPropertyFields)
BIMODAL_ACCESSOR_WITH_FLAG_C(Map, InstanceType, instance_type)
-BIMODAL_ACCESSOR_WITH_FLAG(Map, Object, GetConstructor)
-BIMODAL_ACCESSOR_WITH_FLAG(Map, HeapObject, GetBackPointer)
BIMODAL_ACCESSOR_C(Map, bool, is_abandoned_prototype_map)
int ObjectBoilerplateDescriptionRef::size() const { return object()->size(); }
@@ -2385,33 +2049,16 @@ bool FunctionTemplateInfoRef::is_signature_undefined() const {
return object()->signature().IsUndefined(broker()->isolate());
}
-bool FunctionTemplateInfoRef::has_call_code() const {
- HeapObject call_code = object()->call_code(kAcquireLoad);
- return !call_code.IsUndefined();
-}
-
HEAP_ACCESSOR_C(FunctionTemplateInfo, bool, accept_any_receiver)
HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
- MapRef receiver_map, SerializationPolicy policy) {
+ MapRef receiver_map) {
const HolderLookupResult not_found;
- // There are currently two ways we can see a FunctionTemplateInfo on the
- // background thread: 1.) As part of a SharedFunctionInfo and 2.) in an
- // AccessorPair. In both cases, the FTI is fully constructed on the main
- // thread before.
- // TODO(nicohartmann@, v8:7790): Once the above no longer holds, we might
- // have to use the GC predicate to check whether objects are fully
- // initialized and safe to read.
- if (!receiver_map.IsJSReceiverMap() ||
- (receiver_map.is_access_check_needed() &&
- !object()->accept_any_receiver())) {
+ if (!receiver_map.IsJSObjectMap() || (receiver_map.is_access_check_needed() &&
+ !object()->accept_any_receiver())) {
return not_found;
}
- if (!receiver_map.IsJSObjectMap()) return not_found;
-
- DCHECK(has_call_code());
-
Handle<FunctionTemplateInfo> expected_receiver_type;
{
DisallowGarbageCollection no_gc;
@@ -2424,17 +2071,11 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
if (expected_receiver_type->IsTemplateFor(*receiver_map.object())) {
return HolderLookupResult(CallOptimization::kHolderIsReceiver);
}
-
if (!receiver_map.IsJSGlobalProxyMap()) return not_found;
}
- if (policy == SerializationPolicy::kSerializeIfNeeded) {
- receiver_map.SerializePrototype(NotConcurrentInliningTag{broker()});
- }
base::Optional<HeapObjectRef> prototype = receiver_map.prototype();
- if (!prototype.has_value()) return not_found;
- if (prototype->IsNull()) return not_found;
-
+ if (!prototype.has_value() || prototype->IsNull()) return not_found;
if (!expected_receiver_type->IsTemplateFor(prototype->object()->map())) {
return not_found;
}
@@ -2457,6 +2098,7 @@ ScopeInfoRef ScopeInfoRef::OuterScopeInfo() const {
HEAP_ACCESSOR_C(SharedFunctionInfo, Builtin, builtin_id)
BytecodeArrayRef SharedFunctionInfoRef::GetBytecodeArray() const {
+ CHECK(HasBytecodeArray());
BytecodeArray bytecode_array;
if (!broker()->IsMainThread()) {
bytecode_array = object()->GetBytecodeArray(broker()->local_isolate());
@@ -2480,12 +2122,9 @@ SharedFunctionInfo::Inlineability SharedFunctionInfoRef::GetInlineability()
broker()->is_turboprop());
}
-base::Optional<FeedbackVectorRef> FeedbackCellRef::value() const {
- DisallowGarbageCollection no_gc;
+ObjectRef FeedbackCellRef::value() const {
DCHECK(data_->should_access_heap());
- Object value = object()->value(kAcquireLoad);
- if (!value.IsFeedbackVector()) return base::nullopt;
- return MakeRefAssumeMemoryFence(broker(), FeedbackVector::cast(value));
+ return MakeRefAssumeMemoryFence(broker(), object()->value(kAcquireLoad));
}
base::Optional<ObjectRef> MapRef::GetStrongValue(
@@ -2513,75 +2152,59 @@ base::Optional<HeapObjectRef> MapRef::prototype() const {
return HeapObjectRef(broker(), prototype_data);
}
-void MapRef::SerializeRootMap(NotConcurrentInliningTag tag) {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsMap()->SerializeRootMap(broker(), tag);
+MapRef MapRef::FindRootMap() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // TODO(solanes, v8:7790): Consider caching the result of the root map.
+ return MakeRefAssumeMemoryFence(broker(),
+ object()->FindRootMap(broker()->isolate()));
}
-// TODO(solanes, v8:7790): Remove base::Optional from the return type when
-// deleting serialization.
-base::Optional<MapRef> MapRef::FindRootMap() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // TODO(solanes): Change TryMakeRef to MakeRef when Map is moved to
- // kNeverSerialized.
- // TODO(solanes, v8:7790): Consider caching the result of the root map.
- return TryMakeRef(broker(), object()->FindRootMap(broker()->isolate()));
+ObjectRef MapRef::GetConstructor() const {
+ if (data()->should_access_heap() || broker()->is_concurrent_inlining()) {
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->GetConstructor());
}
- ObjectData* map_data = data()->AsMap()->FindRootMap();
- if (map_data != nullptr) {
- return MapRef(broker(), map_data);
+ return ObjectRef(broker(), data()->AsMap()->GetConstructor());
+}
+
+HeapObjectRef MapRef::GetBackPointer() const {
+ if (data()->should_access_heap() || broker()->is_concurrent_inlining()) {
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(
+ broker(), HeapObject::cast(object()->GetBackPointer()));
}
- TRACE_BROKER_MISSING(broker(), "root map for object " << *this);
- return base::nullopt;
+ return HeapObjectRef(broker(), ObjectRef::data()->AsMap()->GetBackPointer());
}
bool JSTypedArrayRef::is_on_heap() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Safe to read concurrently because:
- // - host object seen by serializer.
- // - underlying field written 1. during initialization or 2. with
- // release-store.
- return object()->is_on_heap(kAcquireLoad);
- }
- return data()->AsJSTypedArray()->data_ptr();
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Underlying field written 1. during initialization or 2. with release-store.
+ return object()->is_on_heap(kAcquireLoad);
}
size_t JSTypedArrayRef::length() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(!is_on_heap());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Safe to read concurrently because:
- // - immutable after initialization.
- // - host object seen by serializer.
- return object()->length();
- }
- return data()->AsJSTypedArray()->length();
+ // Immutable after initialization.
+ return object()->length();
}
HeapObjectRef JSTypedArrayRef::buffer() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(!is_on_heap());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Safe to read concurrently because:
- // - immutable after initialization.
- // - host object seen by serializer.
- return MakeRef<HeapObject>(broker(), object()->buffer());
- }
- return HeapObjectRef{broker(), data()->AsJSTypedArray()->buffer()};
+ // Immutable after initialization.
+ return MakeRef<HeapObject>(broker(), object()->buffer());
}
void* JSTypedArrayRef::data_ptr() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(!is_on_heap());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Safe to read concurrently because:
- // - host object seen by serializer.
- // - underlying field written 1. during initialization or 2. protected by
- // the is_on_heap release/acquire semantics (external_pointer store
- // happens-before base_pointer store, and this external_pointer load
- // happens-after base_pointer load).
- STATIC_ASSERT(JSTypedArray::kOffHeapDataPtrEqualsExternalPointer);
- return object()->DataPtr();
- }
- return data()->AsJSTypedArray()->data_ptr();
+ // Underlying field written 1. during initialization or 2. protected by the
+ // is_on_heap release/acquire semantics (external_pointer store happens-before
+ // base_pointer store, and this external_pointer load happens-after
+ // base_pointer load).
+ STATIC_ASSERT(JSTypedArray::kOffHeapDataPtrEqualsExternalPointer);
+ return object()->DataPtr();
}
bool MapRef::IsInobjectSlackTrackingInProgress() const {
@@ -2642,32 +2265,6 @@ ZoneVector<const CFunctionInfo*> FunctionTemplateInfoRef::c_signatures() const {
bool StringRef::IsSeqString() const { return object()->IsSeqString(); }
-void NativeContextRef::Serialize(NotConcurrentInliningTag tag) {
- // TODO(jgruber): Disable visitation if should_access_heap() once all
- // NativeContext element refs can be created on background threads. Until
- // then, we *must* iterate them and create refs at serialization-time (even
- // though NativeContextRef itself is never-serialized).
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
-#define SERIALIZE_MEMBER(type, name) \
- { \
- ObjectData* member_data = broker()->GetOrCreateData(object()->name()); \
- if (member_data->IsMap() && !InstanceTypeChecker::IsContext( \
- member_data->AsMap()->instance_type())) { \
- member_data->AsMap()->SerializeConstructor(broker(), tag); \
- } \
- }
- BROKER_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
-#undef SERIALIZE_MEMBER
-
- for (int i = Context::FIRST_FUNCTION_MAP_INDEX;
- i <= Context::LAST_FUNCTION_MAP_INDEX; i++) {
- MapData* member_data = broker()->GetOrCreateData(object()->get(i))->AsMap();
- if (!InstanceTypeChecker::IsContext(member_data->instance_type())) {
- member_data->SerializeConstructor(broker(), tag);
- }
- }
-}
-
ScopeInfoRef NativeContextRef::scope_info() const {
// The scope_info is immutable after initialization.
return MakeRefAssumeMemoryFence(broker(), object()->scope_info());
@@ -2777,25 +2374,18 @@ bool ObjectRef::should_access_heap() const {
base::Optional<ObjectRef> JSObjectRef::GetOwnConstantElement(
const FixedArrayBaseRef& elements_ref, uint32_t index,
- CompilationDependencies* dependencies, SerializationPolicy policy) const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- base::Optional<Object> maybe_element = GetOwnConstantElementFromHeap(
- *elements_ref.object(), map().elements_kind(), index);
-
- if (!maybe_element.has_value()) return {};
+ CompilationDependencies* dependencies) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ base::Optional<Object> maybe_element = GetOwnConstantElementFromHeap(
+ *elements_ref.object(), map().elements_kind(), index);
+ if (!maybe_element.has_value()) return {};
- base::Optional<ObjectRef> result =
- TryMakeRef(broker(), maybe_element.value());
- if (policy == SerializationPolicy::kAssumeSerialized &&
- result.has_value()) {
- dependencies->DependOnOwnConstantElement(*this, index, *result);
- }
- return result;
- } else {
- ObjectData* element =
- data()->AsJSObject()->GetOwnConstantElement(broker(), index, policy);
- return TryMakeRef<Object>(broker(), element);
+ base::Optional<ObjectRef> result =
+ TryMakeRef(broker(), maybe_element.value());
+ if (result.has_value()) {
+ dependencies->DependOnOwnConstantElement(*this, index, *result);
}
+ return result;
}
base::Optional<Object> JSObjectRef::GetOwnConstantElementFromHeap(
@@ -2844,109 +2434,82 @@ base::Optional<Object> JSObjectRef::GetOwnConstantElementFromHeap(
base::Optional<ObjectRef> JSObjectRef::GetOwnFastDataProperty(
Representation field_representation, FieldIndex index,
- CompilationDependencies* dependencies, SerializationPolicy policy) const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- base::Optional<ObjectRef> result = GetOwnFastDataPropertyFromHeap(
- broker(), *this, field_representation, index);
- if (policy == SerializationPolicy::kAssumeSerialized &&
- result.has_value()) {
- dependencies->DependOnOwnConstantDataProperty(
- *this, map(), field_representation, index, *result);
- }
- return result;
+ CompilationDependencies* dependencies) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ base::Optional<ObjectRef> result = GetOwnFastDataPropertyFromHeap(
+ broker(), *this, field_representation, index);
+ if (result.has_value()) {
+ dependencies->DependOnOwnConstantDataProperty(
+ *this, map(), field_representation, index, *result);
}
- ObjectData* property = data()->AsJSObject()->GetOwnFastDataProperty(
- broker(), field_representation, index, policy);
- return TryMakeRef<Object>(broker(), property);
+ return result;
}
base::Optional<ObjectRef> JSObjectRef::GetOwnDictionaryProperty(
- InternalIndex index, CompilationDependencies* dependencies,
- SerializationPolicy policy) const {
+ InternalIndex index, CompilationDependencies* dependencies) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(index.is_found());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- base::Optional<ObjectRef> result =
- GetOwnDictionaryPropertyFromHeap(broker(), object(), index);
- if (policy == SerializationPolicy::kAssumeSerialized &&
- result.has_value()) {
- dependencies->DependOnOwnConstantDictionaryProperty(*this, index,
- *result);
- }
- return result;
+ base::Optional<ObjectRef> result =
+ GetOwnDictionaryPropertyFromHeap(broker(), object(), index);
+ if (result.has_value()) {
+ dependencies->DependOnOwnConstantDictionaryProperty(*this, index, *result);
}
- ObjectData* property =
- data()->AsJSObject()->GetOwnDictionaryProperty(broker(), index, policy);
- CHECK_NE(property, nullptr);
- return ObjectRef(broker(), property);
+ return result;
}
ObjectRef JSArrayRef::GetBoilerplateLength() const {
// Safe to read concurrently because:
// - boilerplates are immutable after initialization.
// - boilerplates are published into the feedback vector.
- return length_unsafe();
+ // These facts also mean we can expect a valid value.
+ return length_unsafe().value();
}
-ObjectRef JSArrayRef::length_unsafe() const {
+base::Optional<ObjectRef> JSArrayRef::length_unsafe() const {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return MakeRef(broker(),
- object()->length(broker()->isolate(), kRelaxedLoad));
+ return TryMakeRef(broker(),
+ object()->length(broker()->isolate(), kRelaxedLoad));
} else {
return ObjectRef{broker(), data()->AsJSArray()->length()};
}
}
base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
- FixedArrayBaseRef elements_ref, uint32_t index,
- SerializationPolicy policy) const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Note: we'd like to check `elements_ref == elements()` here, but due to
- // concurrency this may not hold. The code below must be able to deal with
- // concurrent `elements` modifications.
-
- // Due to concurrency, the kind read here may not be consistent with
- // `elements_ref`. The caller has to guarantee consistency at runtime by
- // other means (e.g. through a runtime equality check or a compilation
- // dependency).
- ElementsKind elements_kind = map().elements_kind();
-
- // We only inspect fixed COW arrays, which may only occur for fast
- // smi/objects elements kinds.
- if (!IsSmiOrObjectElementsKind(elements_kind)) return {};
- DCHECK(IsFastElementsKind(elements_kind));
- if (!elements_ref.map().IsFixedCowArrayMap()) return {};
-
- // As the name says, the `length` read here is unsafe and may not match
- // `elements`. We rely on the invariant that any `length` change will
- // also result in an `elements` change to make this safe. The `elements`
- // consistency check in the caller thus also guards the value of `length`.
- ObjectRef length_ref = length_unsafe();
-
- // Likewise we only deal with smi lengths.
- if (!length_ref.IsSmi()) return {};
-
- base::Optional<Object> result =
- ConcurrentLookupIterator::TryGetOwnCowElement(
- broker()->isolate(), *elements_ref.AsFixedArray().object(),
- elements_kind, length_ref.AsSmi(), index);
- if (!result.has_value()) return {};
-
- return TryMakeRef(broker(), result.value());
- } else {
- DCHECK(!data_->should_access_heap());
- DCHECK(!broker()->is_concurrent_inlining());
+ FixedArrayBaseRef elements_ref, uint32_t index) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Note: we'd like to check `elements_ref == elements()` here, but due to
+ // concurrency this may not hold. The code below must be able to deal with
+ // concurrent `elements` modifications.
- // Just to clarify that `elements_ref` is not used on this path.
- // GetOwnElement accesses the serialized `elements` field on its own.
- USE(elements_ref);
+ // Due to concurrency, the kind read here may not be consistent with
+ // `elements_ref`. The caller has to guarantee consistency at runtime by
+ // other means (e.g. through a runtime equality check or a compilation
+ // dependency).
+ ElementsKind elements_kind = map().elements_kind();
- if (!elements(kRelaxedLoad).value().map().IsFixedCowArrayMap()) return {};
+ // We only inspect fixed COW arrays, which may only occur for fast
+ // smi/objects elements kinds.
+ if (!IsSmiOrObjectElementsKind(elements_kind)) return {};
+ DCHECK(IsFastElementsKind(elements_kind));
+ if (!elements_ref.map().IsFixedCowArrayMap()) return {};
- ObjectData* element =
- data()->AsJSArray()->GetOwnElement(broker(), index, policy);
- if (element == nullptr) return base::nullopt;
- return ObjectRef(broker(), element);
- }
+ // As the name says, the `length` read here is unsafe and may not match
+ // `elements`. We rely on the invariant that any `length` change will
+ // also result in an `elements` change to make this safe. The `elements`
+ // consistency check in the caller thus also guards the value of `length`.
+ base::Optional<ObjectRef> length_ref = length_unsafe();
+
+ if (!length_ref.has_value()) return {};
+
+ // Likewise we only deal with smi lengths.
+ if (!length_ref->IsSmi()) return {};
+
+ base::Optional<Object> result = ConcurrentLookupIterator::TryGetOwnCowElement(
+ broker()->isolate(), *elements_ref.AsFixedArray().object(), elements_kind,
+ length_ref->AsSmi(), index);
+ if (!result.has_value()) return {};
+
+ return TryMakeRef(broker(), result.value());
}
base::Optional<CellRef> SourceTextModuleRef::GetCell(int cell_index) const {
@@ -3062,15 +2625,22 @@ base::Optional<ObjectRef> DescriptorArrayRef::GetStrongValue(
return TryMakeRef(broker(), heap_object);
}
+base::Optional<FeedbackVectorRef> FeedbackCellRef::feedback_vector() const {
+ ObjectRef contents = value();
+ if (!contents.IsFeedbackVector()) return {};
+ return contents.AsFeedbackVector();
+}
+
base::Optional<SharedFunctionInfoRef> FeedbackCellRef::shared_function_info()
const {
- base::Optional<FeedbackVectorRef> feedback_vector = value();
- if (!feedback_vector.has_value()) return {};
- return feedback_vector->shared_function_info();
+ base::Optional<FeedbackVectorRef> vector = feedback_vector();
+ if (!vector.has_value()) return {};
+ return vector->shared_function_info();
}
SharedFunctionInfoRef FeedbackVectorRef::shared_function_info() const {
- return MakeRef(broker(), object()->shared_function_info());
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->shared_function_info());
}
bool NameRef::IsUniqueName() const {
@@ -3143,20 +2713,6 @@ Handle<T> TinyRef<T>::object() const {
HEAP_BROKER_OBJECT_LIST(V)
#undef V
-Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
- const char* function, int line) {
- TRACE_MISSING(broker, "data in function " << function << " at line " << line);
- return AdvancedReducer::NoChange();
-}
-
-bool JSBoundFunctionRef::Serialize(NotConcurrentInliningTag tag) {
- if (data_->should_access_heap()) {
- return true;
- }
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- return data()->AsJSBoundFunction()->Serialize(broker(), tag);
-}
-
#define JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Result, Name, UsedField) \
Result##Ref JSFunctionRef::Name(CompilationDependencies* dependencies) \
const { \
@@ -3174,26 +2730,40 @@ bool JSBoundFunctionRef::Serialize(NotConcurrentInliningTag tag) {
return data()->AsJSFunction()->Name(); \
}
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(bool, has_feedback_vector,
- JSFunctionData::kHasFeedbackVector)
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(bool, has_initial_map,
- JSFunctionData::kHasInitialMap)
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(bool, has_instance_prototype,
- JSFunctionData::kHasInstancePrototype)
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(
+// Like JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C but only depend on the
+// field in question if its recorded value is "relevant". This is in order to
+// tolerate certain state changes during compilation, e.g. from "has no feedback
+// vector" (in which case we would simply do less optimization) to "has feedback
+// vector".
+#define JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C( \
+ Result, Name, UsedField, RelevantValue) \
+ Result JSFunctionRef::Name(CompilationDependencies* dependencies) const { \
+ IF_ACCESS_FROM_HEAP_C(Name); \
+ Result const result = data()->AsJSFunction()->Name(); \
+ if (result == RelevantValue) { \
+ RecordConsistentJSFunctionViewDependencyIfNeeded( \
+ broker(), *this, data()->AsJSFunction(), UsedField); \
+ } \
+ return result; \
+ }
+
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C(bool, has_initial_map,
+ JSFunctionData::kHasInitialMap,
+ true)
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C(
+ bool, has_instance_prototype, JSFunctionData::kHasInstancePrototype, true)
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C(
bool, PrototypeRequiresRuntimeLookup,
- JSFunctionData::kPrototypeRequiresRuntimeLookup)
+ JSFunctionData::kPrototypeRequiresRuntimeLookup, false)
+
JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Map, initial_map,
JSFunctionData::kInitialMap)
JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Object, instance_prototype,
JSFunctionData::kInstancePrototype)
JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(FeedbackCell, raw_feedback_cell,
JSFunctionData::kFeedbackCell)
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(FeedbackVector, feedback_vector,
- JSFunctionData::kFeedbackVector)
BIMODAL_ACCESSOR(JSFunction, Context, context)
-BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
#undef JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP
@@ -3203,6 +2773,11 @@ CodeRef JSFunctionRef::code() const {
return MakeRefAssumeMemoryFence(broker(), object()->code(kAcquireLoad));
}
+NativeContextRef JSFunctionRef::native_context() const {
+ return MakeRefAssumeMemoryFence(broker(),
+ context().object()->native_context());
+}
+
base::Optional<FunctionTemplateInfoRef>
SharedFunctionInfoRef::function_template_info() const {
if (!object()->IsApiFunction()) return {};
@@ -3269,23 +2844,6 @@ void MapRef::SerializePrototype(NotConcurrentInliningTag tag) {
CHECK(TrySerializePrototype(tag));
}
-void JSTypedArrayRef::Serialize(NotConcurrentInliningTag tag) {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Nothing to do.
- } else {
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSTypedArray()->Serialize(broker(), tag);
- }
-}
-
-bool JSTypedArrayRef::serialized() const {
- if (data_->should_access_heap()) return true;
- if (broker()->is_concurrent_inlining()) return true;
- if (data_->AsJSTypedArray()->serialized()) return true;
- TRACE_BROKER_MISSING(broker(), "data for JSTypedArray " << this);
- return false;
-}
-
bool PropertyCellRef::Cache() const {
if (data_->should_access_heap()) return true;
CHECK(broker()->mode() == JSHeapBroker::kSerializing ||
@@ -3293,18 +2851,6 @@ bool PropertyCellRef::Cache() const {
return data()->AsPropertyCell()->Cache(broker());
}
-void FunctionTemplateInfoRef::SerializeCallCode(NotConcurrentInliningTag tag) {
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- // CallHandlerInfo::data may still hold a serialized heap object, so we
- // have to make the broker aware of it.
- // TODO(v8:7790): Remove this case once ObjectRef is never serialized.
- Handle<HeapObject> call_code(object()->call_code(kAcquireLoad),
- broker()->isolate());
- if (call_code->IsCallHandlerInfo()) {
- broker()->GetOrCreateData(Handle<CallHandlerInfo>::cast(call_code)->data());
- }
-}
-
bool NativeContextRef::GlobalIsDetached() const {
base::Optional<ObjectRef> proxy_proto =
global_proxy_object().map().prototype();
@@ -3312,14 +2858,15 @@ bool NativeContextRef::GlobalIsDetached() const {
}
base::Optional<PropertyCellRef> JSGlobalObjectRef::GetPropertyCell(
- NameRef const& name, SerializationPolicy policy) const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return GetPropertyCellFromHeap(broker(), name.object());
- }
-
- ObjectData* property_cell_data = data()->AsJSGlobalObject()->GetPropertyCell(
- broker(), name.data(), policy);
- return TryMakeRef<PropertyCell>(broker(), property_cell_data);
+ NameRef const& name) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ base::Optional<PropertyCell> maybe_cell =
+ ConcurrentLookupIterator::TryGetPropertyCell(
+ broker()->isolate(), broker()->local_isolate_or_isolate(),
+ broker()->target_native_context().global_object().object(),
+ name.object());
+ if (!maybe_cell.has_value()) return {};
+ return TryMakeRef(broker(), *maybe_cell);
}
std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) {
@@ -3347,13 +2894,11 @@ unsigned CodeRef::GetInlinedBytecodeSize() const {
#undef BIMODAL_ACCESSOR
#undef BIMODAL_ACCESSOR_B
#undef BIMODAL_ACCESSOR_C
-#undef BIMODAL_ACCESSOR_WITH_FLAG
#undef BIMODAL_ACCESSOR_WITH_FLAG_B
#undef BIMODAL_ACCESSOR_WITH_FLAG_C
#undef HEAP_ACCESSOR_C
#undef IF_ACCESS_FROM_HEAP
#undef IF_ACCESS_FROM_HEAP_C
-#undef IF_ACCESS_FROM_HEAP_WITH_FLAG
#undef IF_ACCESS_FROM_HEAP_WITH_FLAG_C
#undef TRACE
#undef TRACE_MISSING
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index d580671f6d..4644071ea5 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -55,8 +55,6 @@ inline bool IsAnyStore(AccessMode mode) {
return mode == AccessMode::kStore || mode == AccessMode::kStoreInLiteral;
}
-enum class SerializationPolicy { kAssumeSerialized, kSerializeIfNeeded };
-
// Clarifies in function signatures that a method may only be called when
// concurrent inlining is disabled.
class NotConcurrentInliningTag final {
@@ -272,6 +270,7 @@ class V8_EXPORT_PRIVATE ObjectRef {
private:
friend class FunctionTemplateInfoRef;
friend class JSArrayData;
+ friend class JSFunctionData;
friend class JSGlobalObjectData;
friend class JSGlobalObjectRef;
friend class JSHeapBroker;
@@ -395,9 +394,7 @@ class JSObjectRef : public JSReceiverRef {
// against inconsistency due to weak memory concurrency.
base::Optional<ObjectRef> GetOwnConstantElement(
const FixedArrayBaseRef& elements_ref, uint32_t index,
- CompilationDependencies* dependencies,
- SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ CompilationDependencies* dependencies) const;
// The direct-read implementation of the above, extracted into a helper since
// it's also called from compilation-dependency validation. This helper is
// guaranteed to not create new Ref instances.
@@ -412,16 +409,12 @@ class JSObjectRef : public JSReceiverRef {
// property at code finalization time.
base::Optional<ObjectRef> GetOwnFastDataProperty(
Representation field_representation, FieldIndex index,
- CompilationDependencies* dependencies,
- SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ CompilationDependencies* dependencies) const;
// Return the value of the dictionary property at {index} in the dictionary
// if {index} is known to be an own data property of the object.
base::Optional<ObjectRef> GetOwnDictionaryProperty(
- InternalIndex index, CompilationDependencies* dependencies,
- SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ InternalIndex index, CompilationDependencies* dependencies) const;
// When concurrent inlining is enabled, reads the elements through a direct
// relaxed read. This is to ease the transition to unserialized (or
@@ -451,12 +444,8 @@ class JSBoundFunctionRef : public JSObjectRef {
Handle<JSBoundFunction> object() const;
- bool Serialize(NotConcurrentInliningTag tag);
-
- // TODO(neis): Make return types non-optional once JSFunction is no longer
- // fg-serialized.
- base::Optional<JSReceiverRef> bound_target_function() const;
- base::Optional<ObjectRef> bound_this() const;
+ JSReceiverRef bound_target_function() const;
+ ObjectRef bound_this() const;
FixedArrayRef bound_arguments() const;
};
@@ -474,8 +463,8 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
ContextRef context() const;
NativeContextRef native_context() const;
SharedFunctionInfoRef shared() const;
+ CodeRef code() const;
- bool has_feedback_vector(CompilationDependencies* dependencies) const;
bool has_initial_map(CompilationDependencies* dependencies) const;
bool PrototypeRequiresRuntimeLookup(
CompilationDependencies* dependencies) const;
@@ -484,12 +473,10 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
MapRef initial_map(CompilationDependencies* dependencies) const;
int InitialMapInstanceSizeWithMinSlack(
CompilationDependencies* dependencies) const;
- FeedbackVectorRef feedback_vector(
- CompilationDependencies* dependencies) const;
FeedbackCellRef raw_feedback_cell(
CompilationDependencies* dependencies) const;
-
- CodeRef code() const;
+ base::Optional<FeedbackVectorRef> feedback_vector(
+ CompilationDependencies* dependencies) const;
};
class RegExpBoilerplateDescriptionRef : public HeapObjectRef {
@@ -535,9 +522,6 @@ class ContextRef : public HeapObjectRef {
base::Optional<ObjectRef> get(int index) const;
};
-// TODO(jgruber): Don't serialize NativeContext fields once all refs can be
-// created concurrently.
-
#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
V(JSFunction, array_function) \
V(JSFunction, bigint_function) \
@@ -629,13 +613,12 @@ class FeedbackCellRef : public HeapObjectRef {
DEFINE_REF_CONSTRUCTOR(FeedbackCell, HeapObjectRef)
Handle<FeedbackCell> object() const;
- base::Optional<SharedFunctionInfoRef> shared_function_info() const;
- // TODO(mvstanton): Once we allow inlining of functions we didn't see
- // during serialization, we do need to ensure that any feedback vector
- // we read here has been fully initialized (ie, store-ordered into the
- // cell).
- base::Optional<FeedbackVectorRef> value() const;
+ ObjectRef value() const;
+
+ // Convenience wrappers around {value()}:
+ base::Optional<FeedbackVectorRef> feedback_vector() const;
+ base::Optional<SharedFunctionInfoRef> shared_function_info() const;
};
class FeedbackVectorRef : public HeapObjectRef {
@@ -729,6 +712,8 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
OddballType oddball_type() const;
+ bool CanInlineElementAccess() const;
+
// Note: Only returns a value if the requested elements kind matches the
// current kind, or if the current map is an unmodified JSArray initial map.
base::Optional<MapRef> AsElementsKind(ElementsKind kind) const;
@@ -752,6 +737,7 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
ZoneVector<MapRef>* prototype_maps);
// Concerning the underlying instance_descriptors:
+ DescriptorArrayRef instance_descriptors() const;
MapRef FindFieldOwner(InternalIndex descriptor_index) const;
PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const;
NameRef GetPropertyKey(InternalIndex descriptor_index) const;
@@ -760,11 +746,7 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
base::Optional<ObjectRef> GetStrongValue(
InternalIndex descriptor_number) const;
- DescriptorArrayRef instance_descriptors() const;
-
- void SerializeRootMap(NotConcurrentInliningTag tag);
- base::Optional<MapRef> FindRootMap() const;
-
+ MapRef FindRootMap() const;
ObjectRef GetConstructor() const;
};
@@ -785,17 +767,10 @@ class FunctionTemplateInfoRef : public HeapObjectRef {
bool is_signature_undefined() const;
bool accept_any_receiver() const;
- // The following returns true if the CallHandlerInfo is present.
- bool has_call_code() const;
-
- void SerializeCallCode(NotConcurrentInliningTag tag);
base::Optional<CallHandlerInfoRef> call_code() const;
ZoneVector<Address> c_functions() const;
ZoneVector<const CFunctionInfo*> c_signatures() const;
-
- HolderLookupResult LookupHolderOfExpectedType(
- MapRef receiver_map,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
+ HolderLookupResult LookupHolderOfExpectedType(MapRef receiver_map);
};
class FixedArrayBaseRef : public HeapObjectRef {
@@ -821,12 +796,6 @@ class FixedArrayRef : public FixedArrayBaseRef {
Handle<FixedArray> object() const;
- ObjectRef get(int i) const;
-
- // As above but may fail if Ref construction is not possible (e.g. for
- // serialized types on the background thread).
- // TODO(jgruber): Remove once all Ref types are never-serialized or
- // background-serialized and can thus be created on background threads.
base::Optional<ObjectRef> TryGet(int i) const;
};
@@ -894,15 +863,14 @@ class JSArrayRef : public JSObjectRef {
// storage and {index} is known to be an own data property.
// Note the value returned by this function is only valid if we ensure at
// runtime that the backing store has not changed.
- base::Optional<ObjectRef> GetOwnCowElement(
- FixedArrayBaseRef elements_ref, uint32_t index,
- SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ base::Optional<ObjectRef> GetOwnCowElement(FixedArrayBaseRef elements_ref,
+ uint32_t index) const;
// The `JSArray::length` property; not safe to use in general, but can be
// used in some special cases that guarantee a valid `length` value despite
- // concurrent reads.
- ObjectRef length_unsafe() const;
+ // concurrent reads. The result needs to be optional in case the
+ // return value was created too recently to pass the gc predicate.
+ base::Optional<ObjectRef> length_unsafe() const;
};
class ScopeInfoRef : public HeapObjectRef {
@@ -918,22 +886,23 @@ class ScopeInfoRef : public HeapObjectRef {
ScopeInfoRef OuterScopeInfo() const;
};
-#define BROKER_SFI_FIELDS(V) \
- V(int, internal_formal_parameter_count) \
- V(bool, has_simple_parameters) \
- V(bool, has_duplicate_parameters) \
- V(int, function_map_index) \
- V(FunctionKind, kind) \
- V(LanguageMode, language_mode) \
- V(bool, native) \
- V(bool, HasBreakInfo) \
- V(bool, HasBuiltinId) \
- V(bool, construct_as_builtin) \
- V(bool, HasBytecodeArray) \
- V(int, StartPosition) \
- V(bool, is_compiled) \
- V(bool, IsUserJavaScript) \
- IF_WASM(V, const wasm::WasmModule*, wasm_module) \
+#define BROKER_SFI_FIELDS(V) \
+ V(int, internal_formal_parameter_count_without_receiver) \
+ V(bool, IsDontAdaptArguments) \
+ V(bool, has_simple_parameters) \
+ V(bool, has_duplicate_parameters) \
+ V(int, function_map_index) \
+ V(FunctionKind, kind) \
+ V(LanguageMode, language_mode) \
+ V(bool, native) \
+ V(bool, HasBreakInfo) \
+ V(bool, HasBuiltinId) \
+ V(bool, construct_as_builtin) \
+ V(bool, HasBytecodeArray) \
+ V(int, StartPosition) \
+ V(bool, is_compiled) \
+ V(bool, IsUserJavaScript) \
+ IF_WASM(V, const wasm::WasmModule*, wasm_module) \
IF_WASM(V, const wasm::FunctionSig*, wasm_function_signature)
class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
@@ -966,9 +935,7 @@ class StringRef : public NameRef {
// With concurrent inlining on, we return base::nullopt due to not being able
// to use LookupIterator in a thread-safe way.
- base::Optional<ObjectRef> GetCharAsStringOrUndefined(
- uint32_t index, SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ base::Optional<ObjectRef> GetCharAsStringOrUndefined(uint32_t index) const;
// When concurrently accessing non-read-only non-supported strings, we return
// base::nullopt for these methods.
@@ -1002,10 +969,6 @@ class JSTypedArrayRef : public JSObjectRef {
bool is_on_heap() const;
size_t length() const;
void* data_ptr() const;
-
- void Serialize(NotConcurrentInliningTag tag);
- bool serialized() const;
-
HeapObjectRef buffer() const;
};
@@ -1042,9 +1005,7 @@ class JSGlobalObjectRef : public JSObjectRef {
bool IsDetachedFrom(JSGlobalProxyRef const& proxy) const;
// Can be called even when there is no property cell for the given name.
- base::Optional<PropertyCellRef> GetPropertyCell(
- NameRef const& name, SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ base::Optional<PropertyCellRef> GetPropertyCell(NameRef const& name) const;
};
class JSGlobalProxyRef : public JSObjectRef {
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 28eb30969c..00930998dd 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -944,29 +944,31 @@ void Int64Lowering::LowerNode(Node* node) {
}
case IrOpcode::kWord64AtomicLoad: {
DCHECK_EQ(4, node->InputCount());
- MachineType type = AtomicOpType(node->op());
+ AtomicLoadParameters params = AtomicLoadParametersOf(node->op());
DefaultLowering(node, true);
- if (type == MachineType::Uint64()) {
- NodeProperties::ChangeOp(node, machine()->Word32AtomicPairLoad());
+ if (params.representation() == MachineType::Uint64()) {
+ NodeProperties::ChangeOp(
+ node, machine()->Word32AtomicPairLoad(params.order()));
ReplaceNodeWithProjections(node);
} else {
- NodeProperties::ChangeOp(node, machine()->Word32AtomicLoad(type));
+ NodeProperties::ChangeOp(node, machine()->Word32AtomicLoad(params));
ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
}
break;
}
case IrOpcode::kWord64AtomicStore: {
DCHECK_EQ(5, node->InputCount());
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- if (rep == MachineRepresentation::kWord64) {
+ AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
+ if (params.representation() == MachineRepresentation::kWord64) {
LowerMemoryBaseAndIndex(node);
Node* value = node->InputAt(2);
node->ReplaceInput(2, GetReplacementLow(value));
node->InsertInput(zone(), 3, GetReplacementHigh(value));
- NodeProperties::ChangeOp(node, machine()->Word32AtomicPairStore());
+ NodeProperties::ChangeOp(
+ node, machine()->Word32AtomicPairStore(params.order()));
} else {
DefaultLowering(node, true);
- NodeProperties::ChangeOp(node, machine()->Word32AtomicStore(rep));
+ NodeProperties::ChangeOp(node, machine()->Word32AtomicStore(params));
}
break;
}
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 3dcdc6a33e..91197ead1e 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -728,8 +728,7 @@ class IteratingArrayBuiltinReducerAssembler : public JSCallReducerAssembler {
TNode<HeapObject> elements =
LoadField<HeapObject>(AccessBuilder::ForJSObjectElements(), o);
TNode<Object> value = LoadElement<Object>(
- AccessBuilder::ForFixedArrayElement(kind, LoadSensitivity::kCritical),
- elements, index);
+ AccessBuilder::ForFixedArrayElement(kind), elements, index);
return std::make_pair(index, value);
}
@@ -2099,7 +2098,8 @@ FrameState CreateArtificialFrameState(
FrameState PromiseConstructorFrameState(
const PromiseCtorFrameStateParams& params, CommonOperatorBuilder* common,
Graph* graph) {
- DCHECK_EQ(1, params.shared.internal_formal_parameter_count());
+ DCHECK_EQ(1,
+ params.shared.internal_formal_parameter_count_without_receiver());
return CreateArtificialFrameState(
params.node_ptr, params.outer_frame_state, 1,
BytecodeOffset::ConstructStubInvoke(), FrameStateType::kConstructStub,
@@ -3639,8 +3639,6 @@ Reduction JSCallReducer::ReduceCallApiFunction(
FunctionTemplateInfoRef function_template_info(
shared.function_template_info().value());
- if (!function_template_info.has_call_code()) return NoChange();
-
if (function_template_info.accept_any_receiver() &&
function_template_info.is_signature_undefined()) {
// We might be able to
@@ -3764,7 +3762,8 @@ Reduction JSCallReducer::ReduceCallApiFunction(
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
node->ReplaceInput(1, jsgraph()->Constant(function_template_info));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc));
+ node->InsertInput(graph()->zone(), 2,
+ jsgraph()->Constant(JSParameterCount(argc)));
node->ReplaceInput(3, receiver); // Update receiver input.
node->ReplaceInput(6 + argc, effect); // Update effect input.
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
@@ -4039,7 +4038,8 @@ JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpreadOfCreateArguments(
return NoChange();
}
formal_parameter_count =
- MakeRef(broker(), shared).internal_formal_parameter_count();
+ MakeRef(broker(), shared)
+ .internal_formal_parameter_count_without_receiver();
}
if (type == CreateArgumentsType::kMappedArguments) {
@@ -4309,13 +4309,9 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceJSCall(node, function.shared());
} else if (target_ref.IsJSBoundFunction()) {
JSBoundFunctionRef function = target_ref.AsJSBoundFunction();
- base::Optional<JSReceiverRef> bound_target_function =
- function.bound_target_function();
- if (!bound_target_function.has_value()) return NoChange();
- base::Optional<ObjectRef> bound_this = function.bound_this();
- if (!bound_this.has_value()) return NoChange();
+ ObjectRef bound_this = function.bound_this();
ConvertReceiverMode const convert_mode =
- bound_this->IsNullOrUndefined()
+ bound_this.IsNullOrUndefined()
? ConvertReceiverMode::kNullOrUndefined
: ConvertReceiverMode::kNotNullOrUndefined;
@@ -4336,9 +4332,9 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Patch {node} to use [[BoundTargetFunction]] and [[BoundThis]].
NodeProperties::ReplaceValueInput(
- node, jsgraph()->Constant(*bound_target_function),
+ node, jsgraph()->Constant(function.bound_target_function()),
JSCallNode::TargetIndex());
- NodeProperties::ReplaceValueInput(node, jsgraph()->Constant(*bound_this),
+ NodeProperties::ReplaceValueInput(node, jsgraph()->Constant(bound_this),
JSCallNode::ReceiverIndex());
// Insert the [[BoundArguments]] for {node}.
@@ -4372,13 +4368,13 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceJSCall(node, p.shared_info(broker()));
} else if (target->opcode() == IrOpcode::kCheckClosure) {
FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(target->op()));
- if (cell.shared_function_info().has_value()) {
- return ReduceJSCall(node, *cell.shared_function_info());
- } else {
+ base::Optional<SharedFunctionInfoRef> shared = cell.shared_function_info();
+ if (!shared.has_value()) {
TRACE_BROKER_MISSING(broker(), "Unable to reduce JSCall. FeedbackCell "
<< cell << " has no FeedbackVector");
return NoChange();
}
+ return ReduceJSCall(node, *shared);
}
// If {target} is the result of a JSCreateBoundFunction operation,
@@ -4457,7 +4453,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
} else if (feedback_target.has_value() && feedback_target->IsFeedbackCell()) {
FeedbackCellRef feedback_cell =
MakeRef(broker(), feedback_target.value().AsFeedbackCell().object());
- if (feedback_cell.value().has_value()) {
+ // TODO(neis): This check seems unnecessary.
+ if (feedback_cell.feedback_vector().has_value()) {
// Check that {target} is a closure with given {feedback_cell},
// which uniquely identifies a given function inside a native context.
Node* target_closure = effect =
@@ -5055,9 +5052,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
}
} else if (target_ref.IsJSBoundFunction()) {
JSBoundFunctionRef function = target_ref.AsJSBoundFunction();
- base::Optional<JSReceiverRef> bound_target_function =
- function.bound_target_function();
- if (!bound_target_function.has_value()) return NoChange();
+ JSReceiverRef bound_target_function = function.bound_target_function();
FixedArrayRef bound_arguments = function.bound_arguments();
const int bound_arguments_length = bound_arguments.length();
@@ -5076,20 +5071,20 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Patch {node} to use [[BoundTargetFunction]].
node->ReplaceInput(n.TargetIndex(),
- jsgraph()->Constant(*bound_target_function));
+ jsgraph()->Constant(bound_target_function));
// Patch {node} to use [[BoundTargetFunction]]
// as new.target if {new_target} equals {target}.
if (target == new_target) {
node->ReplaceInput(n.NewTargetIndex(),
- jsgraph()->Constant(*bound_target_function));
+ jsgraph()->Constant(bound_target_function));
} else {
node->ReplaceInput(
n.NewTargetIndex(),
graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
graph()->NewNode(simplified()->ReferenceEqual(),
target, new_target),
- jsgraph()->Constant(*bound_target_function),
+ jsgraph()->Constant(bound_target_function),
new_target));
}
@@ -6373,9 +6368,8 @@ Reduction JSCallReducer::ReduceStringPrototypeStringAt(
index, receiver_length, effect, control);
// Return the character from the {receiver} as single character string.
- Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
Node* value = effect = graph()->NewNode(string_access_operator, receiver,
- masked_index, effect, control);
+ index, effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -6433,11 +6427,9 @@ Reduction JSCallReducer::ReduceStringPrototypeStartsWith(Node* node) {
Node* etrue = effect;
Node* vtrue;
{
- Node* masked_position = graph()->NewNode(
- simplified()->PoisonIndex(), unsigned_position);
Node* string_first = etrue =
graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- masked_position, etrue, if_true);
+ unsigned_position, etrue, if_true);
Node* search_first =
jsgraph()->Constant(str.GetFirstChar().value());
@@ -6488,10 +6480,8 @@ Reduction JSCallReducer::ReduceStringPrototypeCharAt(Node* node) {
index, receiver_length, effect, control);
// Return the character from the {receiver} as single character string.
- Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
- Node* value = effect =
- graph()->NewNode(simplified()->StringCharCodeAt(), receiver, masked_index,
- effect, control);
+ Node* value = effect = graph()->NewNode(simplified()->StringCharCodeAt(),
+ receiver, index, effect, control);
value = graph()->NewNode(simplified()->StringFromSingleCharCode(), value);
ReplaceWithValue(node, value, effect, control);
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 02e5cb1710..36217ca13b 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -103,7 +103,16 @@ base::Optional<ContextRef> GetSpecializationContext(
Maybe<OuterContext> maybe_outer) {
switch (node->opcode()) {
case IrOpcode::kHeapConstant: {
- HeapObjectRef object = MakeRef(broker, HeapConstantOf(node->op()));
+ // TODO(jgruber,chromium:1209798): Using kAssumeMemoryFence works around
+ // the fact that the graph stores handles (and not refs). The assumption
+ // is that any handle inserted into the graph is safe to read; but we
+ // don't preserve the reason why it is safe to read. Thus we must
+ // over-approximate here and assume the existence of a memory fence. In
+ // the future, we should consider having the graph store ObjectRefs or
+ // ObjectData pointer instead, which would make new ref construction here
+ // unnecessary.
+ HeapObjectRef object =
+ MakeRefAssumeMemoryFence(broker, HeapConstantOf(node->op()));
if (object.IsContext()) return object.AsContext();
break;
}
@@ -231,7 +240,16 @@ base::Optional<ContextRef> GetModuleContext(JSHeapBroker* broker, Node* node,
switch (context->opcode()) {
case IrOpcode::kHeapConstant: {
- HeapObjectRef object = MakeRef(broker, HeapConstantOf(context->op()));
+ // TODO(jgruber,chromium:1209798): Using kAssumeMemoryFence works around
+ // the fact that the graph stores handles (and not refs). The assumption
+ // is that any handle inserted into the graph is safe to read; but we
+ // don't preserve the reason why it is safe to read. Thus we must
+ // over-approximate here and assume the existence of a memory fence. In
+ // the future, we should consider having the graph store ObjectRefs or
+ // ObjectData pointer instead, which would make new ref construction here
+ // unnecessary.
+ HeapObjectRef object =
+ MakeRefAssumeMemoryFence(broker, HeapConstantOf(context->op()));
if (object.IsContext()) {
return find_context(object.AsContext());
}
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 414977eb7d..60c9017fc2 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -197,11 +197,11 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Node* const arguments_length =
graph()->NewNode(simplified()->ArgumentsLength());
// Allocate the elements backing store.
- Node* const elements = effect =
- graph()->NewNode(simplified()->NewArgumentsElements(
- CreateArgumentsType::kUnmappedArguments,
- shared.internal_formal_parameter_count()),
- arguments_length, effect);
+ Node* const elements = effect = graph()->NewNode(
+ simplified()->NewArgumentsElements(
+ CreateArgumentsType::kUnmappedArguments,
+ shared.internal_formal_parameter_count_without_receiver()),
+ arguments_length, effect);
// Load the arguments object map.
Node* const arguments_map =
jsgraph()->Constant(native_context().strict_arguments_map());
@@ -222,14 +222,14 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* const arguments_length =
graph()->NewNode(simplified()->ArgumentsLength());
- Node* const rest_length = graph()->NewNode(
- simplified()->RestLength(shared.internal_formal_parameter_count()));
+ Node* const rest_length = graph()->NewNode(simplified()->RestLength(
+ shared.internal_formal_parameter_count_without_receiver()));
// Allocate the elements backing store.
- Node* const elements = effect =
- graph()->NewNode(simplified()->NewArgumentsElements(
- CreateArgumentsType::kRestParameter,
- shared.internal_formal_parameter_count()),
- arguments_length, effect);
+ Node* const elements = effect = graph()->NewNode(
+ simplified()->NewArgumentsElements(
+ CreateArgumentsType::kRestParameter,
+ shared.internal_formal_parameter_count_without_receiver()),
+ arguments_length, effect);
// Load the JSArray object map.
Node* const jsarray_map = jsgraph()->Constant(
native_context().js_array_packed_elements_map());
@@ -332,7 +332,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
return Changed(node);
}
case CreateArgumentsType::kRestParameter: {
- int start_index = shared.internal_formal_parameter_count();
+ int start_index =
+ shared.internal_formal_parameter_count_without_receiver();
// Use inline allocation for all unmapped arguments objects within inlined
// (i.e. non-outermost) frames, independent of the object size.
Node* effect = NodeProperties::GetEffectInput(node);
@@ -401,7 +402,8 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
// Allocate a register file.
SharedFunctionInfoRef shared = js_function.shared();
DCHECK(shared.HasBytecodeArray());
- int parameter_count_no_receiver = shared.internal_formal_parameter_count();
+ int parameter_count_no_receiver =
+ shared.internal_formal_parameter_count_without_receiver();
int length = parameter_count_no_receiver +
shared.GetBytecodeArray().register_count();
MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
@@ -466,9 +468,10 @@ Reduction JSCreateLowering::ReduceNewArray(
// Constructing an Array via new Array(N) where N is an unsigned
// integer, always creates a holey backing store.
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
- initial_map,
- initial_map.AsElementsKind(GetHoleyElementsKind(elements_kind)));
+ base::Optional<MapRef> maybe_initial_map =
+ initial_map.AsElementsKind(GetHoleyElementsKind(elements_kind));
+ if (!maybe_initial_map.has_value()) return NoChange();
+ initial_map = maybe_initial_map.value();
// Because CheckBounds performs implicit conversion from string to number, an
// additional CheckNumber is required to behave correctly for calls with a
@@ -525,8 +528,12 @@ Reduction JSCreateLowering::ReduceNewArray(
if (NodeProperties::GetType(length).Max() > 0.0) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
- initial_map, initial_map.AsElementsKind(elements_kind));
+
+ base::Optional<MapRef> maybe_initial_map =
+ initial_map.AsElementsKind(elements_kind);
+ if (!maybe_initial_map.has_value()) return NoChange();
+ initial_map = maybe_initial_map.value();
+
DCHECK(IsFastElementsKind(elements_kind));
// Setup elements and properties.
@@ -566,8 +573,11 @@ Reduction JSCreateLowering::ReduceNewArray(
// Determine the appropriate elements kind.
DCHECK(IsFastElementsKind(elements_kind));
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
- initial_map, initial_map.AsElementsKind(elements_kind));
+
+ base::Optional<MapRef> maybe_initial_map =
+ initial_map.AsElementsKind(elements_kind);
+ if (!maybe_initial_map.has_value()) return NoChange();
+ initial_map = maybe_initial_map.value();
// Check {values} based on the {elements_kind}. These checks are guarded
// by the {elements_kind} feedback on the {site}, so it's safe to just
@@ -1479,7 +1489,8 @@ Node* JSCreateLowering::TryAllocateAliasedArguments(
// If there is no aliasing, the arguments object elements are not special in
// any way, we can just return an unmapped backing store instead.
- int parameter_count = shared.internal_formal_parameter_count();
+ int parameter_count =
+ shared.internal_formal_parameter_count_without_receiver();
if (parameter_count == 0) {
return TryAllocateArguments(effect, control, frame_state);
}
@@ -1545,7 +1556,8 @@ Node* JSCreateLowering::TryAllocateAliasedArguments(
const SharedFunctionInfoRef& shared, bool* has_aliased_arguments) {
// If there is no aliasing, the arguments object elements are not
// special in any way, we can just return an unmapped backing store.
- int parameter_count = shared.internal_formal_parameter_count();
+ int parameter_count =
+ shared.internal_formal_parameter_count_without_receiver();
if (parameter_count == 0) {
return graph()->NewNode(
simplified()->NewArgumentsElements(
@@ -1713,7 +1725,6 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
Type::Any(),
MachineType::AnyTagged(),
kFullWriteBarrier,
- LoadSensitivity::kUnsafe,
const_field_info};
// Note: the use of RawInobjectPropertyAt (vs. the higher-level
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index bbc47e45ad..08896e3f11 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -586,7 +586,7 @@ void JSGenericLowering::LowerJSCreateArray(Node* node) {
// between top of stack and JS arguments.
DCHECK_EQ(interface_descriptor.GetStackParameterCount(), 0);
Node* stub_code = jsgraph()->ArrayConstructorStubConstant();
- Node* stub_arity = jsgraph()->Int32Constant(arity);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arity));
base::Optional<AllocationSiteRef> const site = p.site(broker());
Node* type_info = site.has_value() ? jsgraph()->Constant(site.value())
: jsgraph()->UndefinedConstant();
@@ -820,7 +820,7 @@ void JSGenericLowering::LowerJSConstructForwardVarargs(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arg_count));
Node* start_index = jsgraph()->Uint32Constant(p.start_index());
Node* receiver = jsgraph()->UndefinedConstant();
node->InsertInput(zone(), 0, stub_code);
@@ -843,7 +843,7 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arg_count));
Node* receiver = jsgraph()->UndefinedConstant();
node->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(zone(), 0, stub_code);
@@ -906,7 +906,8 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
Node* stub_code = jsgraph()->HeapConstant(callable.code());
// We pass the spread in a register, not on the stack.
- Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
+ Node* stub_arity =
+ jsgraph()->Int32Constant(JSParameterCount(arg_count - kTheSpread));
Node* receiver = jsgraph()->UndefinedConstant();
DCHECK(n.FeedbackVectorIndex() > n.LastArgumentIndex());
node->RemoveInput(n.FeedbackVectorIndex());
@@ -930,7 +931,7 @@ void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arg_count));
Node* start_index = jsgraph()->Uint32Constant(p.start_index());
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, stub_arity);
@@ -951,7 +952,7 @@ void JSGenericLowering::LowerJSCall(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arg_count));
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, stub_arity);
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
@@ -1009,7 +1010,8 @@ void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
Node* stub_code = jsgraph()->HeapConstant(callable.code());
// We pass the spread in a register, not on the stack.
- Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
+ Node* stub_arity =
+ jsgraph()->Int32Constant(JSParameterCount(arg_count - kTheSpread));
// Shuffling inputs.
// Before: {target, receiver, ...args, spread, vector}.
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index dc34bcae6d..0007a582a0 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -50,12 +50,10 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
array_and_object_prototypes_(zone()),
tracing_enabled_(tracing_enabled),
is_concurrent_inlining_(is_concurrent_inlining),
- is_isolate_bootstrapping_(isolate->bootstrapper()->IsActive()),
code_kind_(code_kind),
feedback_(zone()),
property_access_infos_(zone()),
- minimorphic_property_access_infos_(zone()),
- typed_array_string_tags_(zone()) {
+ minimorphic_property_access_infos_(zone()) {
// Note that this initialization of {refs_} with the minimal initial capacity
// is redundant in the normal use case (concurrent compilation enabled,
// standard objects to be serialized), as the map is going to be replaced
@@ -220,20 +218,6 @@ bool JSHeapBroker::ObjectMayBeUninitialized(HeapObject object) const {
return !IsMainThread() && isolate()->heap()->IsPendingAllocation(object);
}
-bool CanInlineElementAccess(MapRef const& map) {
- if (!map.IsJSObjectMap()) return false;
- if (map.is_access_check_needed()) return false;
- if (map.has_indexed_interceptor()) return false;
- ElementsKind const elements_kind = map.elements_kind();
- if (IsFastElementsKind(elements_kind)) return true;
- if (IsTypedArrayElementsKind(elements_kind) &&
- elements_kind != BIGUINT64_ELEMENTS &&
- elements_kind != BIGINT64_ELEMENTS) {
- return true;
- }
- return false;
-}
-
ProcessedFeedback::ProcessedFeedback(Kind kind, FeedbackSlotKind slot_kind)
: kind_(kind), slot_kind_(slot_kind) {}
@@ -423,7 +407,10 @@ ElementAccessFeedback::ElementAccessFeedback(Zone* zone,
bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const {
for (auto const& group : transition_groups()) {
for (Handle<Map> map : group) {
- if (!MakeRef(broker, map).IsStringMap()) return false;
+ // We assume a memory fence because {map} was read earlier from
+ // the feedback vector and was store ordered on insertion into the
+ // vector.
+ if (!MakeRefAssumeMemoryFence(broker, map).IsStringMap()) return false;
}
}
return true;
@@ -880,11 +867,7 @@ ElementAccessFeedback const& JSHeapBroker::ProcessFeedbackMapsForElementAccess(
MapHandles possible_transition_targets;
possible_transition_targets.reserve(maps.size());
for (MapRef& map : maps) {
- if (!is_concurrent_inlining()) {
- map.SerializeRootMap(NotConcurrentInliningTag{this});
- }
-
- if (CanInlineElementAccess(map) &&
+ if (map.CanInlineElementAccess() &&
IsFastElementsKind(map.elements_kind()) &&
GetInitialFastElementsKind() != map.elements_kind()) {
possible_transition_targets.push_back(map.object());
@@ -992,9 +975,13 @@ MinimorphicLoadPropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
MinimorphicLoadPropertyAccessInfo access_info =
factory.ComputePropertyAccessInfo(feedback);
if (is_concurrent_inlining_) {
+ // We can assume a memory fence on {source.vector} because in production,
+ // the vector has already passed the gc predicate. Unit tests create
+ // FeedbackSource objects directly from handles, but they run on
+ // the main thread.
TRACE(this, "Storing MinimorphicLoadPropertyAccessInfo for "
<< source.index() << " "
- << MakeRef<Object>(this, source.vector));
+ << MakeRefAssumeMemoryFence<Object>(this, source.vector));
minimorphic_property_access_infos_.insert({source, access_info});
}
return access_info;
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 91b94bebb5..bf9b9aaac0 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -117,7 +117,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
Zone* zone() const { return zone_; }
bool tracing_enabled() const { return tracing_enabled_; }
bool is_concurrent_inlining() const { return is_concurrent_inlining_; }
- bool is_isolate_bootstrapping() const { return is_isolate_bootstrapping_; }
bool is_turboprop() const { return code_kind_ == CodeKind::TURBOPROP; }
NexusConfig feedback_nexus_config() const {
@@ -173,7 +172,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
ProcessedFeedback const* feedback);
FeedbackSlotKind GetFeedbackSlotKind(FeedbackSource const& source) const;
- // TODO(neis): Move these into serializer when we're always in the background.
ElementAccessFeedback const& ProcessFeedbackMapsForElementAccess(
ZoneVector<MapRef>& maps, KeyedAccessMode const& keyed_mode,
FeedbackSlotKind slot_kind);
@@ -291,8 +289,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
void IncrementTracingIndentation();
void DecrementTracingIndentation();
- RootIndexMap const& root_index_map() { return root_index_map_; }
-
// Locks {mutex} through the duration of this scope iff it is the first
// occurrence. This is done to have a recursive shared lock on {mutex}.
class V8_NODISCARD RecursiveSharedMutexGuardIfNeeded {
@@ -389,8 +385,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
void CollectArrayAndObjectPrototypes();
- PerIsolateCompilerCache* compiler_cache() const { return compiler_cache_; }
-
void set_persistent_handles(
std::unique_ptr<PersistentHandles> persistent_handles) {
DCHECK_NULL(ph_);
@@ -419,7 +413,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
std::unique_ptr<CanonicalHandlesMap> canonical_handles);
Isolate* const isolate_;
- Zone* const zone_ = nullptr;
+ Zone* const zone_;
base::Optional<NativeContextRef> target_native_context_;
RefsMap* refs_;
RootIndexMap root_index_map_;
@@ -429,13 +423,11 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
BrokerMode mode_ = kDisabled;
bool const tracing_enabled_;
bool const is_concurrent_inlining_;
- bool const is_isolate_bootstrapping_;
CodeKind const code_kind_;
std::unique_ptr<PersistentHandles> ph_;
LocalIsolate* local_isolate_ = nullptr;
std::unique_ptr<CanonicalHandlesMap> canonical_handles_;
unsigned trace_indentation_ = 0;
- PerIsolateCompilerCache* compiler_cache_ = nullptr;
ZoneUnorderedMap<FeedbackSource, ProcessedFeedback const*,
FeedbackSource::Hash, FeedbackSource::Equal>
feedback_;
@@ -446,8 +438,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
FeedbackSource::Hash, FeedbackSource::Equal>
minimorphic_property_access_infos_;
- ZoneVector<ObjectData*> typed_array_string_tags_;
-
CompilationDependencies* dependencies_ = nullptr;
// The MapUpdater mutex is used in recursive patterns; for example,
@@ -460,7 +450,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
// Likewise for boilerplate migrations.
int boilerplate_migration_mutex_depth_ = 0;
- static constexpr size_t kMaxSerializedFunctionsCacheSize = 200;
static constexpr uint32_t kMinimalRefsBucketCount = 8;
STATIC_ASSERT(base::bits::IsPowerOfTwo(kMinimalRefsBucketCount));
static constexpr uint32_t kInitialRefsBucketCount = 1024;
@@ -487,21 +476,6 @@ class V8_NODISCARD TraceScope {
JSHeapBroker* const broker_;
};
-#define ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(something_var, \
- optionally_something) \
- auto optionally_something_ = optionally_something; \
- if (!optionally_something_) \
- return NoChangeBecauseOfMissingData(broker(), __FUNCTION__, __LINE__); \
- something_var = *optionally_something_;
-
-class Reduction;
-Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
- const char* function, int line);
-
-// Miscellaneous definitions that should be moved elsewhere once concurrent
-// compilation is finished.
-bool CanInlineElementAccess(MapRef const& map);
-
// Scope that unparks the LocalHeap, if:
// a) We have a JSHeapBroker,
// b) Said JSHeapBroker has a LocalIsolate and thus a LocalHeap,
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 177f35c7a0..c6a223b600 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -27,8 +27,40 @@ bool IsSmall(int const size) {
}
bool CanConsiderForInlining(JSHeapBroker* broker,
- SharedFunctionInfoRef const& shared,
- FeedbackVectorRef const& feedback_vector) {
+ FeedbackCellRef const& feedback_cell) {
+ base::Optional<FeedbackVectorRef> feedback_vector =
+ feedback_cell.feedback_vector();
+ if (!feedback_vector.has_value()) {
+ TRACE("Cannot consider " << feedback_cell
+ << " for inlining (no feedback vector)");
+ return false;
+ }
+ SharedFunctionInfoRef shared = feedback_vector->shared_function_info();
+
+ if (!shared.HasBytecodeArray()) {
+ TRACE("Cannot consider " << shared << " for inlining (no bytecode)");
+ return false;
+ }
+ // Ensure we have a persistent handle to the bytecode in order to avoid
+ // flushing it during the remaining compilation.
+ shared.GetBytecodeArray();
+
+ // Read feedback vector again in case it got flushed before we were able to
+ // prevent flushing above.
+ base::Optional<FeedbackVectorRef> feedback_vector_again =
+ feedback_cell.feedback_vector();
+ if (!feedback_vector_again.has_value()) {
+ TRACE("Cannot consider " << shared << " for inlining (no feedback vector)");
+ return false;
+ }
+ if (!feedback_vector_again->equals(*feedback_vector)) {
+ // The new feedback vector likely contains lots of uninitialized slots, so
+ // it doesn't make much sense to inline this function now.
+ TRACE("Not considering " << shared
+ << " for inlining (feedback vector changed)");
+ return false;
+ }
+
SharedFunctionInfo::Inlineability inlineability = shared.GetInlineability();
if (inlineability != SharedFunctionInfo::kIsInlineable) {
TRACE("Cannot consider "
@@ -36,22 +68,20 @@ bool CanConsiderForInlining(JSHeapBroker* broker,
return false;
}
- DCHECK(shared.HasBytecodeArray());
- TRACE("Considering " << shared << " for inlining with " << feedback_vector);
+ TRACE("Considering " << shared << " for inlining with " << *feedback_vector);
return true;
}
bool CanConsiderForInlining(JSHeapBroker* broker,
JSFunctionRef const& function) {
- if (!function.has_feedback_vector(broker->dependencies())) {
- TRACE("Cannot consider " << function
- << " for inlining (no feedback vector)");
- return false;
- }
-
- return CanConsiderForInlining(
- broker, function.shared(),
- function.feedback_vector(broker->dependencies()));
+ FeedbackCellRef feedback_cell =
+ function.raw_feedback_cell(broker->dependencies());
+ bool const result = CanConsiderForInlining(broker, feedback_cell);
+ if (result) {
+ CHECK(
+ function.shared().equals(feedback_cell.shared_function_info().value()));
+ }
+ return result;
}
} // namespace
@@ -65,8 +95,8 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
HeapObjectMatcher m(callee);
if (m.HasResolvedValue() && m.Ref(broker()).IsJSFunction()) {
- out.functions[0] = m.Ref(broker()).AsJSFunction();
- JSFunctionRef function = out.functions[0].value();
+ JSFunctionRef function = m.Ref(broker()).AsJSFunction();
+ out.functions[0] = function;
if (CanConsiderForInlining(broker(), function)) {
out.bytecode[0] = function.shared().GetBytecodeArray();
out.num_functions = 1;
@@ -98,10 +128,9 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
if (m.IsCheckClosure()) {
DCHECK(!out.functions[0].has_value());
FeedbackCellRef feedback_cell = MakeRef(broker(), FeedbackCellOf(m.op()));
- SharedFunctionInfoRef shared_info = *feedback_cell.shared_function_info();
- out.shared_info = shared_info;
- if (CanConsiderForInlining(broker(), shared_info, *feedback_cell.value())) {
- out.bytecode[0] = shared_info.GetBytecodeArray();
+ if (CanConsiderForInlining(broker(), feedback_cell)) {
+ out.shared_info = feedback_cell.shared_function_info().value();
+ out.bytecode[0] = out.shared_info->GetBytecodeArray();
}
out.num_functions = 1;
return out;
@@ -109,13 +138,11 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
if (m.IsJSCreateClosure()) {
DCHECK(!out.functions[0].has_value());
JSCreateClosureNode n(callee);
- CreateClosureParameters const& p = n.Parameters();
FeedbackCellRef feedback_cell = n.GetFeedbackCellRefChecked(broker());
- SharedFunctionInfoRef shared_info = p.shared_info(broker());
- out.shared_info = shared_info;
- if (feedback_cell.value().has_value() &&
- CanConsiderForInlining(broker(), shared_info, *feedback_cell.value())) {
- out.bytecode[0] = shared_info.GetBytecodeArray();
+ if (CanConsiderForInlining(broker(), feedback_cell)) {
+ out.shared_info = feedback_cell.shared_function_info().value();
+ out.bytecode[0] = out.shared_info->GetBytecodeArray();
+ CHECK(out.shared_info->equals(n.Parameters().shared_info(broker())));
}
out.num_functions = 1;
return out;
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index a17a43ecd2..deb8345bf7 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -305,7 +305,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
JSFunctionRef function = match.Ref(broker()).AsJSFunction();
// The function might have not been called yet.
- if (!function.has_feedback_vector(broker()->dependencies())) {
+ if (!function.feedback_vector(broker()->dependencies()).has_value()) {
return base::nullopt;
}
@@ -355,7 +355,7 @@ FeedbackCellRef JSInliner::DetermineCallContext(Node* node,
if (match.HasResolvedValue() && match.Ref(broker()).IsJSFunction()) {
JSFunctionRef function = match.Ref(broker()).AsJSFunction();
// This was already ensured by DetermineCallTarget
- CHECK(function.has_feedback_vector(broker()->dependencies()));
+ CHECK(function.feedback_vector(broker()->dependencies()).has_value());
// The inlinee specializes to the context from the JSFunction object.
*context_out = jsgraph()->Constant(function.context());
@@ -709,7 +709,8 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// Insert argument adaptor frame if required. The callees formal parameter
// count have to match the number of arguments passed
// to the call.
- int parameter_count = shared_info->internal_formal_parameter_count();
+ int parameter_count =
+ shared_info->internal_formal_parameter_count_without_receiver();
DCHECK_EQ(parameter_count, start.FormalParameterCountWithoutReceiver());
if (call.argument_count() != parameter_count) {
frame_state = CreateArtificialFrameState(
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index e03e0d41a3..cdbc4848cc 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -230,8 +230,9 @@ Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionEnter(
broker(),
FrameStateInfoOf(frame_state->op()).shared_info().ToHandleChecked());
DCHECK(shared.is_compiled());
- int register_count = shared.internal_formal_parameter_count() +
- shared.GetBytecodeArray().register_count();
+ int register_count =
+ shared.internal_formal_parameter_count_without_receiver() +
+ shared.GetBytecodeArray().register_count();
MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
AllocationBuilder ab(jsgraph(), effect, control);
if (!ab.CanAllocateArray(register_count, fixed_array_map)) {
@@ -617,15 +618,11 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// OrdinaryHasInstance on bound functions turns into a recursive invocation
// of the instanceof operator again.
JSBoundFunctionRef function = m.Ref(broker()).AsJSBoundFunction();
- base::Optional<JSReceiverRef> bound_target_function =
- function.bound_target_function();
- if (bound_target_function.has_value()) return NoChange();
-
Node* feedback = jsgraph()->UndefinedConstant();
NodeProperties::ReplaceValueInput(node, object,
JSInstanceOfNode::LeftIndex());
NodeProperties::ReplaceValueInput(
- node, jsgraph()->Constant(*bound_target_function),
+ node, jsgraph()->Constant(function.bound_target_function()),
JSInstanceOfNode::RightIndex());
node->InsertInput(zone(), JSInstanceOfNode::FeedbackVectorIndex(),
feedback);
@@ -970,6 +967,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
break;
}
case PropertyCellType::kUndefined:
+ case PropertyCellType::kInTransition:
UNREACHABLE();
}
}
@@ -1635,8 +1633,7 @@ void JSNativeContextSpecialization::RemoveImpossibleMaps(
maps->erase(std::remove_if(maps->begin(), maps->end(),
[root_map](const MapRef& map) {
return map.is_abandoned_prototype_map() ||
- (map.FindRootMap().has_value() &&
- !map.FindRootMap()->equals(*root_map));
+ !map.FindRootMap().equals(*root_map);
}),
maps->end());
}
@@ -1747,14 +1744,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
}
- // Check if we have the necessary data for building element accesses.
for (ElementAccessInfo const& access_info : access_infos) {
if (!IsTypedArrayElementsKind(access_info.elements_kind())) continue;
- base::Optional<JSTypedArrayRef> typed_array =
- GetTypedArrayConstant(broker(), receiver);
- if (typed_array.has_value() && !typed_array->serialized()) {
- return NoChange();
- }
}
// Check for the monomorphic case.
@@ -2256,10 +2247,6 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
Node* JSNativeContextSpecialization::InlineApiCall(
Node* receiver, Node* holder, Node* frame_state, Node* value, Node** effect,
Node** control, FunctionTemplateInfoRef const& function_template_info) {
- if (!function_template_info.has_call_code()) {
- return nullptr;
- }
-
if (!function_template_info.call_code().has_value()) {
TRACE_BROKER_MISSING(broker(), "call code for function template info "
<< function_template_info);
@@ -2449,7 +2436,6 @@ JSNativeContextSpecialization::BuildPropertyStore(
field_type,
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier,
- LoadSensitivity::kUnsafe,
access_info.GetConstFieldInfo(),
access_mode == AccessMode::kStoreInLiteral};
@@ -2483,7 +2469,6 @@ JSNativeContextSpecialization::BuildPropertyStore(
Type::OtherInternal(),
MachineType::TaggedPointer(),
kPointerWriteBarrier,
- LoadSensitivity::kUnsafe,
access_info.GetConstFieldInfo(),
access_mode == AccessMode::kStoreInLiteral};
storage = effect =
@@ -2789,10 +2774,8 @@ JSNativeContextSpecialization::BuildElementAccess(
if (situation == kHandleOOB_SmiCheckDone) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch = graph()->NewNode(
- common()->Branch(BranchHint::kTrue,
- IsSafetyCheck::kCriticalSafetyCheck),
- check, control);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
@@ -2980,10 +2963,9 @@ JSNativeContextSpecialization::BuildElementAccess(
element_type = Type::SignedSmall();
element_machine_type = MachineType::TaggedSigned();
}
- ElementAccess element_access = {
- kTaggedBase, FixedArray::kHeaderSize,
- element_type, element_machine_type,
- kFullWriteBarrier, LoadSensitivity::kCritical};
+ ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
+ element_type, element_machine_type,
+ kFullWriteBarrier};
// Access the actual element.
if (keyed_mode.access_mode() == AccessMode::kLoad) {
@@ -3003,10 +2985,8 @@ JSNativeContextSpecialization::BuildElementAccess(
CanTreatHoleAsUndefined(receiver_maps)) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch = graph()->NewNode(
- common()->Branch(BranchHint::kTrue,
- IsSafetyCheck::kCriticalSafetyCheck),
- check, control);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
@@ -3289,9 +3269,7 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue,
- IsSafetyCheck::kCriticalSafetyCheck),
- check, *control);
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, *control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
// Do a real bounds check against {length}. This is in order to protect
@@ -3302,10 +3280,8 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
CheckBoundsFlag::kConvertStringAndMinusZero |
CheckBoundsFlag::kAbortOnOutOfBounds),
index, length, *effect, if_true);
- Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
- Node* vtrue = etrue =
- graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- masked_index, etrue, if_true);
+ Node* vtrue = etrue = graph()->NewNode(simplified()->StringCharCodeAt(),
+ receiver, index, etrue, if_true);
vtrue = graph()->NewNode(simplified()->StringFromSingleCharCode(), vtrue);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -3323,12 +3299,9 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
CheckBoundsFlag::kConvertStringAndMinusZero),
index, length, *effect, *control);
- Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
-
// Return the character from the {receiver} as single character string.
- Node* value = *effect =
- graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- masked_index, *effect, *control);
+ Node* value = *effect = graph()->NewNode(
+ simplified()->StringCharCodeAt(), receiver, index, *effect, *control);
value = graph()->NewNode(simplified()->StringFromSingleCharCode(), value);
return value;
}
@@ -3465,10 +3438,7 @@ base::Optional<MapRef> JSNativeContextSpecialization::InferRootMap(
base::Optional<MapRef> initial_map =
NodeProperties::GetJSCreateMap(broker(), object);
if (initial_map.has_value()) {
- if (!initial_map->FindRootMap().has_value()) {
- return base::nullopt;
- }
- DCHECK(initial_map->equals(*initial_map->FindRootMap()));
+ DCHECK(initial_map->equals(initial_map->FindRootMap()));
return *initial_map;
}
}
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index e986ef1baf..8d67e41751 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -998,9 +998,9 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
HeapObjectMatcher m(input);
if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
StringRef input_value = m.Ref(broker()).AsString();
- double number;
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(number, input_value.ToNumber());
- return Replace(jsgraph()->Constant(number));
+ base::Optional<double> number = input_value.ToNumber();
+ if (!number.has_value()) return NoChange();
+ return Replace(jsgraph()->Constant(number.value()));
}
}
if (input_type.IsHeapConstant()) {
@@ -1595,7 +1595,8 @@ Reduction JSTypedLowering::ReduceJSConstructForwardVarargs(Node* node) {
Callable callable = CodeFactory::ConstructFunctionForwardVarargs(isolate());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 3,
+ jsgraph()->Constant(JSParameterCount(arity)));
node->InsertInput(graph()->zone(), 4, jsgraph()->Constant(start_index));
node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(
@@ -1633,7 +1634,8 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
STATIC_ASSERT(JSConstructNode::NewTargetIndex() == 1);
node->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(graph()->zone(), 0, jsgraph()->Constant(code));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 3,
+ jsgraph()->Constant(JSParameterCount(arity)));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(
@@ -1663,7 +1665,8 @@ Reduction JSTypedLowering::ReduceJSCallForwardVarargs(Node* node) {
Callable callable = CodeFactory::CallFunctionForwardVarargs(isolate());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 2,
+ jsgraph()->Constant(JSParameterCount(arity)));
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(start_index));
NodeProperties::ChangeOp(
node, common()->Call(Linkage::GetStubCallDescriptor(
@@ -1750,8 +1753,11 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
Node* new_target = jsgraph()->UndefinedConstant();
- int formal_count = shared->internal_formal_parameter_count();
- if (formal_count != kDontAdaptArgumentsSentinel && formal_count > arity) {
+ int formal_count =
+ shared->internal_formal_parameter_count_without_receiver();
+ // TODO(v8:11112): Once the sentinel is always 0, the check against
+ // IsDontAdaptArguments() can be removed.
+ if (!shared->IsDontAdaptArguments() && formal_count > arity) {
node->RemoveInput(n.FeedbackVectorIndex());
// Underapplication. Massage the arguments to match the expected number of
// arguments.
@@ -1763,7 +1769,7 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
// Patch {node} to a direct call.
node->InsertInput(graph()->zone(), formal_count + 2, new_target);
node->InsertInput(graph()->zone(), formal_count + 3,
- jsgraph()->Constant(arity));
+ jsgraph()->Constant(JSParameterCount(arity)));
NodeProperties::ChangeOp(node,
common()->Call(Linkage::GetJSCallDescriptor(
graph()->zone(), false, 1 + formal_count,
@@ -1786,13 +1792,15 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
node->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(graph()->zone(), 0, stub_code); // Code object.
node->InsertInput(graph()->zone(), 2, new_target);
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 3,
+ jsgraph()->Constant(JSParameterCount(arity)));
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
// Patch {node} to a direct call.
node->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(graph()->zone(), arity + 2, new_target);
- node->InsertInput(graph()->zone(), arity + 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), arity + 3,
+ jsgraph()->Constant(JSParameterCount(arity)));
NodeProperties::ChangeOp(node,
common()->Call(Linkage::GetJSCallDescriptor(
graph()->zone(), false, 1 + arity,
@@ -1811,7 +1819,8 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
Callable callable = CodeFactory::CallFunction(isolate(), convert_mode);
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 2,
+ jsgraph()->Constant(JSParameterCount(arity)));
NodeProperties::ChangeOp(
node, common()->Call(Linkage::GetStubCallDescriptor(
graph()->zone(), callable.descriptor(), 1 + arity, flags)));
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index fac24e802d..fec0040b61 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -219,9 +219,10 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone,
// If we are compiling a JS function, use a JS call descriptor,
// plus the receiver.
SharedFunctionInfo shared = info->closure()->shared();
- return GetJSCallDescriptor(zone, info->is_osr(),
- 1 + shared.internal_formal_parameter_count(),
- CallDescriptor::kCanUseRoots);
+ return GetJSCallDescriptor(
+ zone, info->is_osr(),
+ shared.internal_formal_parameter_count_with_receiver(),
+ CallDescriptor::kCanUseRoots);
}
return nullptr; // TODO(titzer): ?
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 8b33444b29..707c7d98ab 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -214,15 +214,13 @@ class V8_EXPORT_PRIVATE CallDescriptor final
kInitializeRootRegister = 1u << 3,
// Does not ever try to allocate space on our heap.
kNoAllocate = 1u << 4,
- // Use retpoline for this call if indirect.
- kRetpoline = 1u << 5,
// Use the kJavaScriptCallCodeStartRegister (fixed) register for the
// indirect target address when calling.
- kFixedTargetRegister = 1u << 6,
- kCallerSavedRegisters = 1u << 7,
+ kFixedTargetRegister = 1u << 5,
+ kCallerSavedRegisters = 1u << 6,
// The kCallerSavedFPRegisters only matters (and set) when the more general
// flag for kCallerSavedRegisters above is also set.
- kCallerSavedFPRegisters = 1u << 8,
+ kCallerSavedFPRegisters = 1u << 7,
// Tail calls for tier up are special (in fact they are different enough
// from normal tail calls to warrant a dedicated opcode; but they also have
// enough similar aspects that reusing the TailCall opcode is pragmatic).
@@ -238,15 +236,15 @@ class V8_EXPORT_PRIVATE CallDescriptor final
//
// In other words, behavior is identical to a jmp instruction prior caller
// frame construction.
- kIsTailCallForTierUp = 1u << 9,
+ kIsTailCallForTierUp = 1u << 8,
+
+ // AIX has a function descriptor by default but it can be disabled for a
+ // certain CFunction call (only used for Kind::kCallAddress).
+ kNoFunctionDescriptor = 1u << 9,
// Flags past here are *not* encoded in InstructionCode and are thus not
// accessible from the code generator. See also
// kFlagsBitsEncodedInInstructionCode.
-
- // AIX has a function descriptor by default but it can be disabled for a
- // certain CFunction call (only used for Kind::kCallAddress).
- kNoFunctionDescriptor = 1u << 10,
};
using Flags = base::Flags<Flag>;
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index e184534ed7..7b660856b7 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -5,12 +5,17 @@
#include "src/compiler/loop-analysis.h"
#include "src/codegen/tick-counter.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/zone/zone.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-code-manager.h"
+#endif
+
namespace v8 {
namespace internal {
@@ -581,12 +586,24 @@ ZoneUnorderedSet<Node*>* LoopFinder::FindSmallUnnestedLoopFromHeader(
loop_header);
// All uses are outside the loop, do nothing.
break;
- case IrOpcode::kCall:
case IrOpcode::kTailCall:
case IrOpcode::kJSWasmCall:
case IrOpcode::kJSCall:
// Call nodes are considered to have unbounded size, i.e. >max_size.
+ // An exception is the call to the stack guard builtin at the beginning
+ // of many loops.
return nullptr;
+ case IrOpcode::kCall: {
+ Node* callee = node->InputAt(0);
+ if (callee->opcode() == IrOpcode::kRelocatableInt32Constant ||
+ callee->opcode() == IrOpcode::kRelocatableInt64Constant) {
+ auto info = OpParameter<RelocatablePtrConstantInfo>(callee->op());
+ if (info.value() != v8::internal::wasm::WasmCode::kWasmStackGuard) {
+ return nullptr;
+ }
+ }
+ V8_FALLTHROUGH;
+ }
default:
for (Node* use : node->uses()) {
if (visited->count(use) == 0) queue.push_back(use);
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 88679283d9..fedb208b5f 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -121,10 +121,14 @@ class MachineRepresentationInferrer {
break;
case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kWord64AtomicLoad:
+ representation_vector_[node->id()] =
+ PromoteRepresentation(AtomicLoadParametersOf(node->op())
+ .representation()
+ .representation());
+ break;
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
case IrOpcode::kProtectedLoad:
- case IrOpcode::kPoisonedLoad:
representation_vector_[node->id()] = PromoteRepresentation(
LoadRepresentationOf(node->op()).representation());
break;
@@ -154,8 +158,8 @@ class MachineRepresentationInferrer {
}
case IrOpcode::kWord32AtomicStore:
case IrOpcode::kWord64AtomicStore:
- representation_vector_[node->id()] =
- PromoteRepresentation(AtomicStoreRepresentationOf(node->op()));
+ representation_vector_[node->id()] = PromoteRepresentation(
+ AtomicStoreParametersOf(node->op()).representation());
break;
case IrOpcode::kWord32AtomicPairLoad:
case IrOpcode::kWord32AtomicPairStore:
@@ -206,15 +210,8 @@ class MachineRepresentationInferrer {
case IrOpcode::kChangeInt32ToTagged:
case IrOpcode::kChangeUint32ToTagged:
case IrOpcode::kBitcastWordToTagged:
- case IrOpcode::kTaggedPoisonOnSpeculation:
representation_vector_[node->id()] = MachineRepresentation::kTagged;
break;
- case IrOpcode::kWord32PoisonOnSpeculation:
- representation_vector_[node->id()] = MachineRepresentation::kWord32;
- break;
- case IrOpcode::kWord64PoisonOnSpeculation:
- representation_vector_[node->id()] = MachineRepresentation::kWord64;
- break;
case IrOpcode::kCompressedHeapConstant:
representation_vector_[node->id()] =
MachineRepresentation::kCompressedPointer;
@@ -394,14 +391,6 @@ class MachineRepresentationChecker {
CheckValueInputRepresentationIs(
node, 0, MachineType::PointerRepresentation());
break;
- case IrOpcode::kWord32PoisonOnSpeculation:
- CheckValueInputRepresentationIs(node, 0,
- MachineRepresentation::kWord32);
- break;
- case IrOpcode::kWord64PoisonOnSpeculation:
- CheckValueInputRepresentationIs(node, 0,
- MachineRepresentation::kWord64);
- break;
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits:
if (COMPRESS_POINTERS_BOOL) {
@@ -410,9 +399,6 @@ class MachineRepresentationChecker {
CheckValueInputIsTagged(node, 0);
}
break;
- case IrOpcode::kTaggedPoisonOnSpeculation:
- CheckValueInputIsTagged(node, 0);
- break;
case IrOpcode::kTruncateFloat64ToWord32:
case IrOpcode::kTruncateFloat64ToUint32:
case IrOpcode::kTruncateFloat64ToFloat32:
@@ -566,7 +552,6 @@ class MachineRepresentationChecker {
case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kWord32AtomicPairLoad:
case IrOpcode::kWord64AtomicLoad:
- case IrOpcode::kPoisonedLoad:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
@@ -605,9 +590,12 @@ class MachineRepresentationChecker {
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTaggedSigned:
if (COMPRESS_POINTERS_BOOL &&
- node->opcode() == IrOpcode::kStore &&
- IsAnyTagged(
- StoreRepresentationOf(node->op()).representation())) {
+ ((node->opcode() == IrOpcode::kStore &&
+ IsAnyTagged(StoreRepresentationOf(node->op())
+ .representation())) ||
+ (node->opcode() == IrOpcode::kWord32AtomicStore &&
+ IsAnyTagged(AtomicStoreParametersOf(node->op())
+ .representation())))) {
CheckValueInputIsCompressedOrTagged(node, 2);
} else {
CheckValueInputIsTagged(node, 2);
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 33d58c854b..775e5ada81 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -947,6 +947,20 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
return ReduceWord64Comparisons(node);
}
+ case IrOpcode::kFloat32Select:
+ case IrOpcode::kFloat64Select:
+ case IrOpcode::kWord32Select:
+ case IrOpcode::kWord64Select: {
+ Int32Matcher match(node->InputAt(0));
+ if (match.HasResolvedValue()) {
+ if (match.Is(0)) {
+ return Replace(node->InputAt(2));
+ } else {
+ return Replace(node->InputAt(1));
+ }
+ }
+ break;
+ }
default:
break;
}
@@ -2061,7 +2075,6 @@ bool IsFloat64RepresentableAsFloat32(const Float64Matcher& m) {
} // namespace
-
Reduction MachineOperatorReducer::ReduceFloat64Compare(Node* node) {
DCHECK(IrOpcode::kFloat64Equal == node->opcode() ||
IrOpcode::kFloat64LessThan == node->opcode() ||
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 411c6d4cb3..d24030e1a7 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -32,6 +32,41 @@ std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) {
return os << rep.representation() << ", " << rep.write_barrier_kind();
}
+bool operator==(AtomicStoreParameters lhs, AtomicStoreParameters rhs) {
+ return lhs.store_representation() == rhs.store_representation() &&
+ lhs.order() == rhs.order();
+}
+
+bool operator!=(AtomicStoreParameters lhs, AtomicStoreParameters rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(AtomicStoreParameters params) {
+ return base::hash_combine(hash_value(params.store_representation()),
+ params.order());
+}
+
+std::ostream& operator<<(std::ostream& os, AtomicStoreParameters params) {
+ return os << params.store_representation() << ", " << params.order();
+}
+
+bool operator==(AtomicLoadParameters lhs, AtomicLoadParameters rhs) {
+ return lhs.representation() == rhs.representation() &&
+ lhs.order() == rhs.order();
+}
+
+bool operator!=(AtomicLoadParameters lhs, AtomicLoadParameters rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(AtomicLoadParameters params) {
+ return base::hash_combine(params.representation(), params.order());
+}
+
+std::ostream& operator<<(std::ostream& os, AtomicLoadParameters params) {
+ return os << params.representation() << ", " << params.order();
+}
+
size_t hash_value(MemoryAccessKind kind) { return static_cast<size_t>(kind); }
std::ostream& operator<<(std::ostream& os, MemoryAccessKind kind) {
@@ -121,21 +156,29 @@ bool operator==(LoadLaneParameters lhs, LoadLaneParameters rhs) {
LoadRepresentation LoadRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kLoad == op->opcode() ||
IrOpcode::kProtectedLoad == op->opcode() ||
- IrOpcode::kWord32AtomicLoad == op->opcode() ||
- IrOpcode::kWord64AtomicLoad == op->opcode() ||
- IrOpcode::kWord32AtomicPairLoad == op->opcode() ||
- IrOpcode::kPoisonedLoad == op->opcode() ||
IrOpcode::kUnalignedLoad == op->opcode() ||
IrOpcode::kLoadImmutable == op->opcode());
return OpParameter<LoadRepresentation>(op);
}
+AtomicLoadParameters AtomicLoadParametersOf(Operator const* op) {
+ DCHECK(IrOpcode::kWord32AtomicLoad == op->opcode() ||
+ IrOpcode::kWord64AtomicLoad == op->opcode());
+ return OpParameter<AtomicLoadParameters>(op);
+}
+
StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kStore == op->opcode() ||
IrOpcode::kProtectedStore == op->opcode());
return OpParameter<StoreRepresentation>(op);
}
+AtomicStoreParameters const& AtomicStoreParametersOf(Operator const* op) {
+ DCHECK(IrOpcode::kWord32AtomicStore == op->opcode() ||
+ IrOpcode::kWord64AtomicStore == op->opcode());
+ return OpParameter<AtomicStoreParameters>(op);
+}
+
UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
Operator const* op) {
DCHECK_EQ(IrOpcode::kUnalignedStore, op->opcode());
@@ -182,12 +225,6 @@ StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op) {
return OpParameter<StackSlotRepresentation>(op);
}
-MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
- DCHECK(IrOpcode::kWord32AtomicStore == op->opcode() ||
- IrOpcode::kWord64AtomicStore == op->opcode());
- return OpParameter<MachineRepresentation>(op);
-}
-
MachineType AtomicOpType(Operator const* op) {
return OpParameter<MachineType>(op);
}
@@ -650,6 +687,30 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(S128Load32Zero) \
V(S128Load64Zero)
+#if TAGGED_SIZE_8_BYTES
+
+#define ATOMIC_TAGGED_TYPE_LIST(V)
+
+#define ATOMIC64_TAGGED_TYPE_LIST(V) \
+ V(TaggedSigned) \
+ V(TaggedPointer) \
+ V(AnyTagged) \
+ V(CompressedPointer) \
+ V(AnyCompressed)
+
+#else
+
+#define ATOMIC_TAGGED_TYPE_LIST(V) \
+ V(TaggedSigned) \
+ V(TaggedPointer) \
+ V(AnyTagged) \
+ V(CompressedPointer) \
+ V(AnyCompressed)
+
+#define ATOMIC64_TAGGED_TYPE_LIST(V)
+
+#endif // TAGGED_SIZE_8_BYTES
+
#define ATOMIC_U32_TYPE_LIST(V) \
V(Uint8) \
V(Uint16) \
@@ -665,6 +726,28 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
ATOMIC_U32_TYPE_LIST(V) \
V(Uint64)
+#if TAGGED_SIZE_8_BYTES
+
+#define ATOMIC_TAGGED_REPRESENTATION_LIST(V)
+
+#define ATOMIC64_TAGGED_REPRESENTATION_LIST(V) \
+ V(kTaggedSigned) \
+ V(kTaggedPointer) \
+ V(kTagged)
+
+#else
+
+#define ATOMIC_TAGGED_REPRESENTATION_LIST(V) \
+ V(kTaggedSigned) \
+ V(kTaggedPointer) \
+ V(kTagged) \
+ V(kCompressedPointer) \
+ V(kCompressed)
+
+#define ATOMIC64_TAGGED_REPRESENTATION_LIST(V)
+
+#endif // TAGGED_SIZE_8_BYTES
+
#define ATOMIC_REPRESENTATION_LIST(V) \
V(kWord8) \
V(kWord16) \
@@ -831,13 +914,6 @@ struct MachineOperatorGlobalCache {
Operator::kEliminatable, "Load", 2, 1, \
1, 1, 1, 0, MachineType::Type()) {} \
}; \
- struct PoisonedLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- PoisonedLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kPoisonedLoad, Operator::kEliminatable, \
- "PoisonedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
struct UnalignedLoad##Type##Operator final \
: public Operator1<LoadRepresentation> { \
UnalignedLoad##Type##Operator() \
@@ -861,7 +937,6 @@ struct MachineOperatorGlobalCache {
0, 0, 1, 0, 0, MachineType::Type()) {} \
}; \
Load##Type##Operator kLoad##Type; \
- PoisonedLoad##Type##Operator kPoisonedLoad##Type; \
UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
ProtectedLoad##Type##Operator kProtectedLoad##Type; \
LoadImmutable##Type##Operator kLoadImmutable##Type;
@@ -976,55 +1051,63 @@ struct MachineOperatorGlobalCache {
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
-#define ATOMIC_LOAD(Type) \
- struct Word32AtomicLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- Word32AtomicLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
- "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- Word32AtomicLoad##Type##Operator kWord32AtomicLoad##Type;
+#define ATOMIC_LOAD(Type) \
+ struct Word32SeqCstLoad##Type##Operator \
+ : public Operator1<AtomicLoadParameters> { \
+ Word32SeqCstLoad##Type##Operator() \
+ : Operator1<AtomicLoadParameters>( \
+ IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
+ "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, \
+ AtomicLoadParameters(MachineType::Type(), \
+ AtomicMemoryOrder::kSeqCst)) {} \
+ }; \
+ Word32SeqCstLoad##Type##Operator kWord32SeqCstLoad##Type;
ATOMIC_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD
-#define ATOMIC_LOAD(Type) \
- struct Word64AtomicLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- Word64AtomicLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
- "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- Word64AtomicLoad##Type##Operator kWord64AtomicLoad##Type;
+#define ATOMIC_LOAD(Type) \
+ struct Word64SeqCstLoad##Type##Operator \
+ : public Operator1<AtomicLoadParameters> { \
+ Word64SeqCstLoad##Type##Operator() \
+ : Operator1<AtomicLoadParameters>( \
+ IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
+ "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, \
+ AtomicLoadParameters(MachineType::Type(), \
+ AtomicMemoryOrder::kSeqCst)) {} \
+ }; \
+ Word64SeqCstLoad##Type##Operator kWord64SeqCstLoad##Type;
ATOMIC_U64_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD
#define ATOMIC_STORE(Type) \
- struct Word32AtomicStore##Type##Operator \
- : public Operator1<MachineRepresentation> { \
- Word32AtomicStore##Type##Operator() \
- : Operator1<MachineRepresentation>( \
+ struct Word32SeqCstStore##Type##Operator \
+ : public Operator1<AtomicStoreParameters> { \
+ Word32SeqCstStore##Type##Operator() \
+ : Operator1<AtomicStoreParameters>( \
IrOpcode::kWord32AtomicStore, \
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"Word32AtomicStore", 3, 1, 1, 0, 1, 0, \
- MachineRepresentation::Type) {} \
+ AtomicStoreParameters(MachineRepresentation::Type, \
+ kNoWriteBarrier, \
+ AtomicMemoryOrder::kSeqCst)) {} \
}; \
- Word32AtomicStore##Type##Operator kWord32AtomicStore##Type;
+ Word32SeqCstStore##Type##Operator kWord32SeqCstStore##Type;
ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
#undef ATOMIC_STORE
#define ATOMIC_STORE(Type) \
- struct Word64AtomicStore##Type##Operator \
- : public Operator1<MachineRepresentation> { \
- Word64AtomicStore##Type##Operator() \
- : Operator1<MachineRepresentation>( \
+ struct Word64SeqCstStore##Type##Operator \
+ : public Operator1<AtomicStoreParameters> { \
+ Word64SeqCstStore##Type##Operator() \
+ : Operator1<AtomicStoreParameters>( \
IrOpcode::kWord64AtomicStore, \
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"Word64AtomicStore", 3, 1, 1, 0, 1, 0, \
- MachineRepresentation::Type) {} \
+ AtomicStoreParameters(MachineRepresentation::Type, \
+ kNoWriteBarrier, \
+ AtomicMemoryOrder::kSeqCst)) {} \
}; \
- Word64AtomicStore##Type##Operator kWord64AtomicStore##Type;
+ Word64SeqCstStore##Type##Operator kWord64SeqCstStore##Type;
ATOMIC64_REPRESENTATION_LIST(ATOMIC_STORE)
#undef ATOMIC_STORE
@@ -1084,21 +1167,23 @@ struct MachineOperatorGlobalCache {
ATOMIC_U64_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
#undef ATOMIC_COMPARE_EXCHANGE
- struct Word32AtomicPairLoadOperator : public Operator {
- Word32AtomicPairLoadOperator()
- : Operator(IrOpcode::kWord32AtomicPairLoad,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0) {}
+ struct Word32SeqCstPairLoadOperator : public Operator1<AtomicMemoryOrder> {
+ Word32SeqCstPairLoadOperator()
+ : Operator1<AtomicMemoryOrder>(IrOpcode::kWord32AtomicPairLoad,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0,
+ AtomicMemoryOrder::kSeqCst) {}
};
- Word32AtomicPairLoadOperator kWord32AtomicPairLoad;
-
- struct Word32AtomicPairStoreOperator : public Operator {
- Word32AtomicPairStoreOperator()
- : Operator(IrOpcode::kWord32AtomicPairStore,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicPairStore", 4, 1, 1, 0, 1, 0) {}
+ Word32SeqCstPairLoadOperator kWord32SeqCstPairLoad;
+
+ struct Word32SeqCstPairStoreOperator : public Operator1<AtomicMemoryOrder> {
+ Word32SeqCstPairStoreOperator()
+ : Operator1<AtomicMemoryOrder>(IrOpcode::kWord32AtomicPairStore,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairStore", 4, 1, 1, 0, 1,
+ 0, AtomicMemoryOrder::kSeqCst) {}
};
- Word32AtomicPairStoreOperator kWord32AtomicPairStore;
+ Word32SeqCstPairStoreOperator kWord32SeqCstPairStore;
#define ATOMIC_PAIR_OP(op) \
struct Word32AtomicPair##op##Operator : public Operator { \
@@ -1157,30 +1242,6 @@ struct MachineOperatorGlobalCache {
};
BitcastMaybeObjectToWordOperator kBitcastMaybeObjectToWord;
- struct TaggedPoisonOnSpeculation : public Operator {
- TaggedPoisonOnSpeculation()
- : Operator(IrOpcode::kTaggedPoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "TaggedPoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
- };
- TaggedPoisonOnSpeculation kTaggedPoisonOnSpeculation;
-
- struct Word32PoisonOnSpeculation : public Operator {
- Word32PoisonOnSpeculation()
- : Operator(IrOpcode::kWord32PoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "Word32PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
- };
- Word32PoisonOnSpeculation kWord32PoisonOnSpeculation;
-
- struct Word64PoisonOnSpeculation : public Operator {
- Word64PoisonOnSpeculation()
- : Operator(IrOpcode::kWord64PoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "Word64PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
- };
- Word64PoisonOnSpeculation kWord64PoisonOnSpeculation;
-
struct AbortCSAAssertOperator : public Operator {
AbortCSAAssertOperator()
: Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow,
@@ -1366,16 +1427,6 @@ const Operator* MachineOperatorBuilder::LoadImmutable(LoadRepresentation rep) {
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::PoisonedLoad(LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kPoisonedLoad##Type; \
- }
- MACHINE_TYPE_LIST(LOAD)
-#undef LOAD
- UNREACHABLE();
-}
-
const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
@@ -1592,23 +1643,47 @@ const Operator* MachineOperatorBuilder::MemBarrier() {
}
const Operator* MachineOperatorBuilder::Word32AtomicLoad(
- LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kWord32AtomicLoad##Type; \
+ AtomicLoadParameters params) {
+#define CACHED_LOAD(Type) \
+ if (params.representation() == MachineType::Type() && \
+ params.order() == AtomicMemoryOrder::kSeqCst) { \
+ return &cache_.kWord32SeqCstLoad##Type; \
+ }
+ ATOMIC_TYPE_LIST(CACHED_LOAD)
+#undef CACHED_LOAD
+
+#define LOAD(Type) \
+ if (params.representation() == MachineType::Type()) { \
+ return zone_->New<Operator1<AtomicLoadParameters>>( \
+ IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
+ "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, params); \
}
ATOMIC_TYPE_LIST(LOAD)
+ ATOMIC_TAGGED_TYPE_LIST(LOAD)
#undef LOAD
+
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word32AtomicStore(
- MachineRepresentation rep) {
-#define STORE(kRep) \
- if (rep == MachineRepresentation::kRep) { \
- return &cache_.kWord32AtomicStore##kRep; \
+ AtomicStoreParameters params) {
+#define CACHED_STORE(kRep) \
+ if (params.representation() == MachineRepresentation::kRep && \
+ params.order() == AtomicMemoryOrder::kSeqCst) { \
+ return &cache_.kWord32SeqCstStore##kRep; \
+ }
+ ATOMIC_REPRESENTATION_LIST(CACHED_STORE)
+#undef CACHED_STORE
+
+#define STORE(kRep) \
+ if (params.representation() == MachineRepresentation::kRep) { \
+ return zone_->New<Operator1<AtomicStoreParameters>>( \
+ IrOpcode::kWord32AtomicStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Word32AtomicStore", 3, 1, 1, 0, 1, 0, params); \
}
ATOMIC_REPRESENTATION_LIST(STORE)
+ ATOMIC_TAGGED_REPRESENTATION_LIST(STORE)
#undef STORE
UNREACHABLE();
}
@@ -1685,24 +1760,49 @@ const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicLoad(
- LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kWord64AtomicLoad##Type; \
+ AtomicLoadParameters params) {
+#define CACHED_LOAD(Type) \
+ if (params.representation() == MachineType::Type() && \
+ params.order() == AtomicMemoryOrder::kSeqCst) { \
+ return &cache_.kWord64SeqCstLoad##Type; \
+ }
+ ATOMIC_U64_TYPE_LIST(CACHED_LOAD)
+#undef CACHED_LOAD
+
+#define LOAD(Type) \
+ if (params.representation() == MachineType::Type()) { \
+ return zone_->New<Operator1<AtomicLoadParameters>>( \
+ IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
+ "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, params); \
}
ATOMIC_U64_TYPE_LIST(LOAD)
+ ATOMIC64_TAGGED_TYPE_LIST(LOAD)
#undef LOAD
+
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicStore(
- MachineRepresentation rep) {
-#define STORE(kRep) \
- if (rep == MachineRepresentation::kRep) { \
- return &cache_.kWord64AtomicStore##kRep; \
+ AtomicStoreParameters params) {
+#define CACHED_STORE(kRep) \
+ if (params.representation() == MachineRepresentation::kRep && \
+ params.order() == AtomicMemoryOrder::kSeqCst) { \
+ return &cache_.kWord64SeqCstStore##kRep; \
+ }
+ ATOMIC64_REPRESENTATION_LIST(CACHED_STORE)
+#undef CACHED_STORE
+
+#define STORE(kRep) \
+ if (params.representation() == MachineRepresentation::kRep) { \
+ return zone_->New<Operator1<AtomicStoreParameters>>( \
+ IrOpcode::kWord64AtomicStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Word64AtomicStore", 3, 1, 1, 0, 1, 0, params); \
}
ATOMIC64_REPRESENTATION_LIST(STORE)
+ ATOMIC64_TAGGED_REPRESENTATION_LIST(STORE)
#undef STORE
+
UNREACHABLE();
}
@@ -1777,12 +1877,24 @@ const Operator* MachineOperatorBuilder::Word64AtomicCompareExchange(
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word32AtomicPairLoad() {
- return &cache_.kWord32AtomicPairLoad;
+const Operator* MachineOperatorBuilder::Word32AtomicPairLoad(
+ AtomicMemoryOrder order) {
+ if (order == AtomicMemoryOrder::kSeqCst) {
+ return &cache_.kWord32SeqCstPairLoad;
+ }
+ return zone_->New<Operator1<AtomicMemoryOrder>>(
+ IrOpcode::kWord32AtomicPairLoad, Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0, order);
}
-const Operator* MachineOperatorBuilder::Word32AtomicPairStore() {
- return &cache_.kWord32AtomicPairStore;
+const Operator* MachineOperatorBuilder::Word32AtomicPairStore(
+ AtomicMemoryOrder order) {
+ if (order == AtomicMemoryOrder::kSeqCst) {
+ return &cache_.kWord32SeqCstPairStore;
+ }
+ return zone_->New<Operator1<AtomicMemoryOrder>>(
+ IrOpcode::kWord32AtomicPairStore, Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairStore", 4, 1, 1, 0, 1, 0, order);
}
const Operator* MachineOperatorBuilder::Word32AtomicPairAdd() {
@@ -1813,18 +1925,6 @@ const Operator* MachineOperatorBuilder::Word32AtomicPairCompareExchange() {
return &cache_.kWord32AtomicPairCompareExchange;
}
-const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() {
- return &cache_.kTaggedPoisonOnSpeculation;
-}
-
-const Operator* MachineOperatorBuilder::Word32PoisonOnSpeculation() {
- return &cache_.kWord32PoisonOnSpeculation;
-}
-
-const Operator* MachineOperatorBuilder::Word64PoisonOnSpeculation() {
- return &cache_.kWord64PoisonOnSpeculation;
-}
-
#define EXTRACT_LANE_OP(Type, Sign, lane_count) \
const Operator* MachineOperatorBuilder::Type##ExtractLane##Sign( \
int32_t lane_index) { \
@@ -1918,8 +2018,12 @@ StackCheckKind StackCheckKindOf(Operator const* op) {
#undef ATOMIC_TYPE_LIST
#undef ATOMIC_U64_TYPE_LIST
#undef ATOMIC_U32_TYPE_LIST
+#undef ATOMIC_TAGGED_TYPE_LIST
+#undef ATOMIC64_TAGGED_TYPE_LIST
#undef ATOMIC_REPRESENTATION_LIST
+#undef ATOMIC_TAGGED_REPRESENTATION_LIST
#undef ATOMIC64_REPRESENTATION_LIST
+#undef ATOMIC64_TAGGED_REPRESENTATION_LIST
#undef SIMD_LANE_OP_LIST
#undef STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST
#undef LOAD_TRANSFORM_LIST
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 0ee3649ad0..7bd73663ab 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -8,6 +8,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/enum-set.h"
#include "src/base/flags.h"
+#include "src/codegen/atomic-memory-order.h"
#include "src/codegen/machine-type.h"
#include "src/compiler/globals.h"
#include "src/compiler/write-barrier-kind.h"
@@ -50,6 +51,32 @@ using LoadRepresentation = MachineType;
V8_EXPORT_PRIVATE LoadRepresentation LoadRepresentationOf(Operator const*)
V8_WARN_UNUSED_RESULT;
+// A Word(32|64)AtomicLoad needs both a LoadRepresentation and a memory
+// order.
+class AtomicLoadParameters final {
+ public:
+ AtomicLoadParameters(LoadRepresentation representation,
+ AtomicMemoryOrder order)
+ : representation_(representation), order_(order) {}
+
+ LoadRepresentation representation() const { return representation_; }
+ AtomicMemoryOrder order() const { return order_; }
+
+ private:
+ LoadRepresentation representation_;
+ AtomicMemoryOrder order_;
+};
+
+V8_EXPORT_PRIVATE bool operator==(AtomicLoadParameters, AtomicLoadParameters);
+bool operator!=(AtomicLoadParameters, AtomicLoadParameters);
+
+size_t hash_value(AtomicLoadParameters);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AtomicLoadParameters);
+
+V8_EXPORT_PRIVATE AtomicLoadParameters AtomicLoadParametersOf(Operator const*)
+ V8_WARN_UNUSED_RESULT;
+
enum class MemoryAccessKind {
kNormal,
kUnaligned,
@@ -131,6 +158,43 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, StoreRepresentation);
V8_EXPORT_PRIVATE StoreRepresentation const& StoreRepresentationOf(
Operator const*) V8_WARN_UNUSED_RESULT;
+// A Word(32|64)AtomicStore needs both a StoreRepresentation and a memory order.
+class AtomicStoreParameters final {
+ public:
+ AtomicStoreParameters(MachineRepresentation representation,
+ WriteBarrierKind write_barrier_kind,
+ AtomicMemoryOrder order)
+ : store_representation_(representation, write_barrier_kind),
+ order_(order) {}
+
+ MachineRepresentation representation() const {
+ return store_representation_.representation();
+ }
+ WriteBarrierKind write_barrier_kind() const {
+ return store_representation_.write_barrier_kind();
+ }
+ AtomicMemoryOrder order() const { return order_; }
+
+ StoreRepresentation store_representation() const {
+ return store_representation_;
+ }
+
+ private:
+ StoreRepresentation store_representation_;
+ AtomicMemoryOrder order_;
+};
+
+V8_EXPORT_PRIVATE bool operator==(AtomicStoreParameters, AtomicStoreParameters);
+bool operator!=(AtomicStoreParameters, AtomicStoreParameters);
+
+size_t hash_value(AtomicStoreParameters);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ AtomicStoreParameters);
+
+V8_EXPORT_PRIVATE AtomicStoreParameters const& AtomicStoreParametersOf(
+ Operator const*) V8_WARN_UNUSED_RESULT;
+
// An UnalignedStore needs a MachineType.
using UnalignedStoreRepresentation = MachineRepresentation;
@@ -173,9 +237,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
V8_EXPORT_PRIVATE StackSlotRepresentation const& StackSlotRepresentationOf(
Operator const* op) V8_WARN_UNUSED_RESULT;
-MachineRepresentation AtomicStoreRepresentationOf(Operator const* op)
- V8_WARN_UNUSED_RESULT;
-
MachineType AtomicOpType(Operator const* op) V8_WARN_UNUSED_RESULT;
class S128ImmediateParameter {
@@ -852,7 +913,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// load [base + index]
const Operator* Load(LoadRepresentation rep);
const Operator* LoadImmutable(LoadRepresentation rep);
- const Operator* PoisonedLoad(LoadRepresentation rep);
const Operator* ProtectedLoad(LoadRepresentation rep);
const Operator* LoadTransform(MemoryAccessKind kind,
@@ -879,11 +939,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* StackSlot(int size, int alignment = 0);
const Operator* StackSlot(MachineRepresentation rep, int alignment = 0);
- // Destroy value by masking when misspeculating.
- const Operator* TaggedPoisonOnSpeculation();
- const Operator* Word32PoisonOnSpeculation();
- const Operator* Word64PoisonOnSpeculation();
-
// Access to the machine stack.
const Operator* LoadFramePointer();
const Operator* LoadParentFramePointer();
@@ -901,13 +956,13 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* MemBarrier();
// atomic-load [base + index]
- const Operator* Word32AtomicLoad(LoadRepresentation rep);
+ const Operator* Word32AtomicLoad(AtomicLoadParameters params);
// atomic-load [base + index]
- const Operator* Word64AtomicLoad(LoadRepresentation rep);
+ const Operator* Word64AtomicLoad(AtomicLoadParameters params);
// atomic-store [base + index], value
- const Operator* Word32AtomicStore(MachineRepresentation rep);
+ const Operator* Word32AtomicStore(AtomicStoreParameters params);
// atomic-store [base + index], value
- const Operator* Word64AtomicStore(MachineRepresentation rep);
+ const Operator* Word64AtomicStore(AtomicStoreParameters params);
// atomic-exchange [base + index], value
const Operator* Word32AtomicExchange(MachineType type);
// atomic-exchange [base + index], value
@@ -937,9 +992,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// atomic-xor [base + index], value
const Operator* Word64AtomicXor(MachineType type);
// atomic-pair-load [base + index]
- const Operator* Word32AtomicPairLoad();
+ const Operator* Word32AtomicPairLoad(AtomicMemoryOrder order);
// atomic-pair-sub [base + index], value_high, value-low
- const Operator* Word32AtomicPairStore();
+ const Operator* Word32AtomicPairStore(AtomicMemoryOrder order);
// atomic-pair-add [base + index], value_high, value_low
const Operator* Word32AtomicPairAdd();
// atomic-pair-sub [base + index], value_high, value-low
@@ -980,7 +1035,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
V(Word, Ror) \
V(Word, Clz) \
V(Word, Equal) \
- V(Word, PoisonOnSpeculation) \
V(Int, Add) \
V(Int, Sub) \
V(Int, Mul) \
diff --git a/deps/v8/src/compiler/memory-lowering.cc b/deps/v8/src/compiler/memory-lowering.cc
index 9673a51844..27ad71c07a 100644
--- a/deps/v8/src/compiler/memory-lowering.cc
+++ b/deps/v8/src/compiler/memory-lowering.cc
@@ -60,7 +60,6 @@ class MemoryLowering::AllocationGroup final : public ZoneObject {
MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone,
JSGraphAssembler* graph_assembler,
- PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding,
WriteBarrierAssertFailedCallback callback,
const char* function_debug_name)
@@ -71,7 +70,6 @@ MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone,
machine_(jsgraph->machine()),
graph_assembler_(graph_assembler),
allocation_folding_(allocation_folding),
- poisoning_level_(poisoning_level),
write_barrier_assert_failed_(callback),
function_debug_name_(function_debug_name) {}
@@ -401,11 +399,7 @@ Reduction MemoryLowering::ReduceLoadElement(Node* node) {
node->ReplaceInput(1, ComputeIndex(access, index));
MachineType type = access.machine_type;
DCHECK(!type.IsMapWord());
- if (NeedsPoisoning(access.load_sensitivity)) {
- NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
- } else {
- NodeProperties::ChangeOp(node, machine()->Load(type));
- }
+ NodeProperties::ChangeOp(node, machine()->Load(type));
return Changed(node);
}
@@ -413,8 +407,7 @@ Node* MemoryLowering::DecodeExternalPointer(
Node* node, ExternalPointerTag external_pointer_tag) {
#ifdef V8_HEAP_SANDBOX
DCHECK(V8_HEAP_SANDBOX_BOOL);
- DCHECK(node->opcode() == IrOpcode::kLoad ||
- node->opcode() == IrOpcode::kPoisonedLoad);
+ DCHECK(node->opcode() == IrOpcode::kLoad);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
__ InitializeEffectControl(effect, control);
@@ -476,16 +469,11 @@ Reduction MemoryLowering::ReduceLoadField(Node* node) {
}
if (type.IsMapWord()) {
- DCHECK(!NeedsPoisoning(access.load_sensitivity));
DCHECK(!access.type.Is(Type::SandboxedExternalPointer()));
return ReduceLoadMap(node);
}
- if (NeedsPoisoning(access.load_sensitivity)) {
- NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
- } else {
- NodeProperties::ChangeOp(node, machine()->Load(type));
- }
+ NodeProperties::ChangeOp(node, machine()->Load(type));
if (V8_HEAP_SANDBOX_BOOL &&
access.type.Is(Type::SandboxedExternalPointer())) {
@@ -655,21 +643,6 @@ WriteBarrierKind MemoryLowering::ComputeWriteBarrierKind(
return write_barrier_kind;
}
-bool MemoryLowering::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
- // Safe loads do not need poisoning.
- if (load_sensitivity == LoadSensitivity::kSafe) return false;
-
- switch (poisoning_level_) {
- case PoisoningMitigationLevel::kDontPoison:
- return false;
- case PoisoningMitigationLevel::kPoisonAll:
- return true;
- case PoisoningMitigationLevel::kPoisonCriticalOnly:
- return load_sensitivity == LoadSensitivity::kCritical;
- }
- UNREACHABLE();
-}
-
MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
AllocationType allocation,
Zone* zone)
diff --git a/deps/v8/src/compiler/memory-lowering.h b/deps/v8/src/compiler/memory-lowering.h
index 1fbe18abff..9edb880e6f 100644
--- a/deps/v8/src/compiler/memory-lowering.h
+++ b/deps/v8/src/compiler/memory-lowering.h
@@ -75,7 +75,6 @@ class MemoryLowering final : public Reducer {
MemoryLowering(
JSGraph* jsgraph, Zone* zone, JSGraphAssembler* graph_assembler,
- PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding =
AllocationFolding::kDontAllocationFolding,
WriteBarrierAssertFailedCallback callback = [](Node*, Node*, const char*,
@@ -112,7 +111,6 @@ class MemoryLowering final : public Reducer {
Node* DecodeExternalPointer(Node* encoded_pointer, ExternalPointerTag tag);
Reduction ReduceLoadMap(Node* encoded_pointer);
Node* ComputeIndex(ElementAccess const& access, Node* node);
- bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
void EnsureAllocateOperator();
Node* GetWasmInstanceNode();
@@ -133,7 +131,6 @@ class MemoryLowering final : public Reducer {
MachineOperatorBuilder* machine_;
JSGraphAssembler* graph_assembler_;
AllocationFolding allocation_folding_;
- PoisoningMitigationLevel poisoning_level_;
WriteBarrierAssertFailedCallback write_barrier_assert_failed_;
const char* function_debug_name_;
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 860ea1fae1..ba4a5c1f67 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -40,7 +40,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kLoadLane:
case IrOpcode::kLoadTransform:
case IrOpcode::kMemoryBarrier:
- case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
case IrOpcode::kRetain:
@@ -54,7 +53,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kStoreField:
case IrOpcode::kStoreLane:
case IrOpcode::kStoreToObject:
- case IrOpcode::kTaggedPoisonOnSpeculation:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
case IrOpcode::kUnreachable:
@@ -77,7 +75,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kWord32AtomicStore:
case IrOpcode::kWord32AtomicSub:
case IrOpcode::kWord32AtomicXor:
- case IrOpcode::kWord32PoisonOnSpeculation:
case IrOpcode::kWord64AtomicAdd:
case IrOpcode::kWord64AtomicAnd:
case IrOpcode::kWord64AtomicCompareExchange:
@@ -87,7 +84,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kWord64AtomicStore:
case IrOpcode::kWord64AtomicSub:
case IrOpcode::kWord64AtomicXor:
- case IrOpcode::kWord64PoisonOnSpeculation:
return false;
case IrOpcode::kCall:
@@ -183,13 +179,12 @@ void WriteBarrierAssertFailed(Node* node, Node* object, const char* name,
} // namespace
MemoryOptimizer::MemoryOptimizer(
- JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level,
+ JSGraph* jsgraph, Zone* zone,
MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter)
: graph_assembler_(jsgraph, zone),
- memory_lowering_(jsgraph, zone, &graph_assembler_, poisoning_level,
- allocation_folding, WriteBarrierAssertFailed,
- function_debug_name),
+ memory_lowering_(jsgraph, zone, &graph_assembler_, allocation_folding,
+ WriteBarrierAssertFailed, function_debug_name),
jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index 3845304fdd..7d8bca44d4 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -30,7 +30,6 @@ using NodeId = uint32_t;
class MemoryOptimizer final {
public:
MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
- PoisoningMitigationLevel poisoning_level,
MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter);
~MemoryOptimizer() = default;
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 1ce4023424..52dc476dc4 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -743,7 +743,6 @@ struct BaseWithIndexAndDisplacementMatcher {
switch (from->opcode()) {
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
- case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kInt32Add:
case IrOpcode::kInt64Add:
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 912bd7b5ce..b956f148cc 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -463,7 +463,6 @@
V(PlainPrimitiveToFloat64) \
V(PlainPrimitiveToNumber) \
V(PlainPrimitiveToWord32) \
- V(PoisonIndex) \
V(RestLength) \
V(RuntimeAbort) \
V(StoreDataViewElement) \
@@ -686,7 +685,6 @@
V(DebugBreak) \
V(Comment) \
V(Load) \
- V(PoisonedLoad) \
V(LoadImmutable) \
V(Store) \
V(StackSlot) \
@@ -746,9 +744,6 @@
V(Word64Select) \
V(Float32Select) \
V(Float64Select) \
- V(TaggedPoisonOnSpeculation) \
- V(Word32PoisonOnSpeculation) \
- V(Word64PoisonOnSpeculation) \
V(LoadStackCheckOffset) \
V(LoadFramePointer) \
V(LoadParentFramePointer) \
diff --git a/deps/v8/src/compiler/pipeline-statistics.cc b/deps/v8/src/compiler/pipeline-statistics.cc
index 82a6e6bb3e..16366bf588 100644
--- a/deps/v8/src/compiler/pipeline-statistics.cc
+++ b/deps/v8/src/compiler/pipeline-statistics.cc
@@ -10,21 +10,12 @@
#include "src/compiler/zone-stats.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/string.h"
-#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
namespace compiler {
-namespace {
-
-// We log detailed phase information about the pipeline
-// in both the v8.turbofan and the v8.wasm.turbofan categories.
-constexpr const char kTraceCategory[] = // --
- TRACE_DISABLED_BY_DEFAULT("v8.turbofan") "," // --
- TRACE_DISABLED_BY_DEFAULT("v8.wasm.turbofan");
-
-} // namespace
+constexpr char PipelineStatistics::kTraceCategory[];
void PipelineStatistics::CommonStats::Begin(
PipelineStatistics* pipeline_stats) {
@@ -62,6 +53,7 @@ PipelineStatistics::PipelineStatistics(OptimizedCompilationInfo* info,
: outer_zone_(info->zone()),
zone_stats_(zone_stats),
compilation_stats_(compilation_stats),
+ code_kind_(info->code_kind()),
phase_kind_name_(nullptr),
phase_name_(nullptr) {
if (info->has_shared_info()) {
@@ -70,7 +62,6 @@ PipelineStatistics::PipelineStatistics(OptimizedCompilationInfo* info,
total_stats_.Begin(this);
}
-
PipelineStatistics::~PipelineStatistics() {
if (InPhaseKind()) EndPhaseKind();
CompilationStatistics::BasicStats diff;
@@ -82,7 +73,8 @@ PipelineStatistics::~PipelineStatistics() {
void PipelineStatistics::BeginPhaseKind(const char* phase_kind_name) {
DCHECK(!InPhase());
if (InPhaseKind()) EndPhaseKind();
- TRACE_EVENT_BEGIN0(kTraceCategory, phase_kind_name);
+ TRACE_EVENT_BEGIN1(kTraceCategory, phase_kind_name, "kind",
+ CodeKindToString(code_kind_));
phase_kind_name_ = phase_kind_name;
phase_kind_stats_.Begin(this);
}
@@ -92,11 +84,14 @@ void PipelineStatistics::EndPhaseKind() {
CompilationStatistics::BasicStats diff;
phase_kind_stats_.End(this, &diff);
compilation_stats_->RecordPhaseKindStats(phase_kind_name_, diff);
- TRACE_EVENT_END0(kTraceCategory, phase_kind_name_);
+ TRACE_EVENT_END2(kTraceCategory, phase_kind_name_, "kind",
+ CodeKindToString(code_kind_), "stats",
+ TRACE_STR_COPY(diff.AsJSON().c_str()));
}
void PipelineStatistics::BeginPhase(const char* phase_name) {
- TRACE_EVENT_BEGIN0(kTraceCategory, phase_name);
+ TRACE_EVENT_BEGIN1(kTraceCategory, phase_name, "kind",
+ CodeKindToString(code_kind_));
DCHECK(InPhaseKind());
phase_name_ = phase_name;
phase_stats_.Begin(this);
@@ -107,7 +102,9 @@ void PipelineStatistics::EndPhase() {
CompilationStatistics::BasicStats diff;
phase_stats_.End(this, &diff);
compilation_stats_->RecordPhaseStats(phase_kind_name_, phase_name_, diff);
- TRACE_EVENT_END0(kTraceCategory, phase_name_);
+ TRACE_EVENT_END2(kTraceCategory, phase_name_, "kind",
+ CodeKindToString(code_kind_), "stats",
+ TRACE_STR_COPY(diff.AsJSON().c_str()));
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/pipeline-statistics.h b/deps/v8/src/compiler/pipeline-statistics.h
index 8a05d98011..19f7574e2a 100644
--- a/deps/v8/src/compiler/pipeline-statistics.h
+++ b/deps/v8/src/compiler/pipeline-statistics.h
@@ -11,6 +11,8 @@
#include "src/base/platform/elapsed-timer.h"
#include "src/compiler/zone-stats.h"
#include "src/diagnostics/compilation-statistics.h"
+#include "src/objects/code-kind.h"
+#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
@@ -29,6 +31,12 @@ class PipelineStatistics : public Malloced {
void BeginPhaseKind(const char* phase_kind_name);
void EndPhaseKind();
+ // We log detailed phase information about the pipeline
+ // in both the v8.turbofan and the v8.wasm.turbofan categories.
+ static constexpr char kTraceCategory[] =
+ TRACE_DISABLED_BY_DEFAULT("v8.turbofan") "," // --
+ TRACE_DISABLED_BY_DEFAULT("v8.wasm.turbofan");
+
private:
size_t OuterZoneSize() {
return static_cast<size_t>(outer_zone_->allocation_size());
@@ -60,6 +68,7 @@ class PipelineStatistics : public Malloced {
Zone* outer_zone_;
ZoneStats* zone_stats_;
CompilationStatistics* compilation_stats_;
+ CodeKind code_kind_;
std::string function_name_;
// Stats for the entire compilation.
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index e802cd7268..8d3d93aa2a 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -84,6 +84,7 @@
#include "src/execution/isolate-inl.h"
#include "src/heap/local-heap.h"
#include "src/init/bootstrapper.h"
+#include "src/logging/code-events.h"
#include "src/logging/counters.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/shared-function-info.h"
@@ -95,6 +96,7 @@
#if V8_ENABLE_WEBASSEMBLY
#include "src/compiler/wasm-compiler.h"
+#include "src/compiler/wasm-inlining.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/wasm-engine.h"
@@ -547,8 +549,7 @@ class PipelineData {
code_generator_ = new CodeGenerator(
codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
osr_helper_, start_source_position_, jump_optimization_info_,
- info()->GetPoisoningMitigationLevel(), assembler_options(),
- info_->builtin(), max_unoptimized_frame_height(),
+ assembler_options(), info_->builtin(), max_unoptimized_frame_height(),
max_pushed_argument_count(),
FLAG_trace_turbo_stack_accesses ? debug_name_.get() : nullptr);
}
@@ -947,13 +948,10 @@ void PrintCode(Isolate* isolate, Handle<Code> code,
void TraceScheduleAndVerify(OptimizedCompilationInfo* info, PipelineData* data,
Schedule* schedule, const char* phase_name) {
-#ifdef V8_RUNTIME_CALL_STATS
- PipelineRunScope scope(data, "V8.TraceScheduleAndVerify",
- RuntimeCallCounterId::kOptimizeTraceScheduleAndVerify,
- RuntimeCallStats::kThreadSpecific);
-#else
- PipelineRunScope scope(data, "V8.TraceScheduleAndVerify");
-#endif
+ RCS_SCOPE(data->runtime_call_stats(),
+ RuntimeCallCounterId::kOptimizeTraceScheduleAndVerify,
+ RuntimeCallStats::kThreadSpecific);
+ TRACE_EVENT0(PipelineStatistics::kTraceCategory, "V8.TraceScheduleAndVerify");
if (info->trace_turbo_json()) {
UnparkedScopeIfNeeded scope(data->broker());
AllowHandleDereference allow_deref;
@@ -1161,18 +1159,6 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (FLAG_turbo_inlining) {
compilation_info()->set_inlining();
}
-
- // This is the bottleneck for computing and setting poisoning level in the
- // optimizing compiler.
- PoisoningMitigationLevel load_poisoning =
- PoisoningMitigationLevel::kDontPoison;
- if (FLAG_untrusted_code_mitigations) {
- // For full mitigations, this can be changed to
- // PoisoningMitigationLevel::kPoisonAll.
- load_poisoning = PoisoningMitigationLevel::kPoisonCriticalOnly;
- }
- compilation_info()->SetPoisoningMitigationLevel(load_poisoning);
-
if (FLAG_turbo_allocation_folding) {
compilation_info()->set_allocation_folding();
}
@@ -1424,8 +1410,8 @@ struct InliningPhase {
};
#if V8_ENABLE_WEBASSEMBLY
-struct WasmInliningPhase {
- DECL_PIPELINE_PHASE_CONSTANTS(WasmInlining)
+struct JSWasmInliningPhase {
+ DECL_PIPELINE_PHASE_CONSTANTS(JSWasmInlining)
void Run(PipelineData* data, Zone* temp_zone) {
DCHECK(data->has_js_wasm_calls());
@@ -1629,10 +1615,10 @@ struct SimplifiedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTS(SimplifiedLowering)
void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
- SimplifiedLowering lowering(
- data->jsgraph(), data->broker(), temp_zone, data->source_positions(),
- data->node_origins(), data->info()->GetPoisoningMitigationLevel(),
- &data->info()->tick_counter(), linkage, data->observe_node_manager());
+ SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
+ data->source_positions(), data->node_origins(),
+ &data->info()->tick_counter(), linkage,
+ data->observe_node_manager());
// RepresentationChanger accesses the heap.
UnparkedScopeIfNeeded scope(data->broker());
@@ -1699,6 +1685,25 @@ struct WasmLoopUnrollingPhase {
}
}
};
+
+struct WasmInliningPhase {
+ DECL_PIPELINE_PHASE_CONSTANTS(WasmInlining)
+
+ void Run(PipelineData* data, Zone* temp_zone, wasm::CompilationEnv* env,
+ const wasm::WireBytesStorage* wire_bytes) {
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
+ DeadCodeElimination dead(&graph_reducer, data->graph(),
+ data->mcgraph()->common(), temp_zone);
+ WasmInliner inliner(&graph_reducer, env, data->source_positions(),
+ data->node_origins(), data->mcgraph(), wire_bytes, 0);
+ AddReducer(data, &graph_reducer, &dead);
+ AddReducer(data, &graph_reducer, &inliner);
+
+ graph_reducer.ReduceGraph();
+ }
+};
#endif // V8_ENABLE_WEBASSEMBLY
struct LoopExitEliminationPhase {
@@ -1797,7 +1802,6 @@ struct EffectControlLinearizationPhase {
// - introduce effect phis and rewire effects to get SSA again.
LinearizeEffectControl(data->jsgraph(), schedule, temp_zone,
data->source_positions(), data->node_origins(),
- data->info()->GetPoisoningMitigationLevel(),
data->broker());
}
{
@@ -1899,7 +1903,7 @@ struct MemoryOptimizationPhase {
// Optimize allocations and load/store operations.
MemoryOptimizer optimizer(
- data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(),
+ data->jsgraph(), temp_zone,
data->info()->allocation_folding()
? MemoryLowering::AllocationFolding::kDoAllocationFolding
: MemoryLowering::AllocationFolding::kDontAllocationFolding,
@@ -1989,7 +1993,6 @@ struct ScheduledEffectControlLinearizationPhase {
// - lower simplified memory and select nodes to machine level nodes.
LowerToMachineSchedule(data->jsgraph(), data->schedule(), temp_zone,
data->source_positions(), data->node_origins(),
- data->info()->GetPoisoningMitigationLevel(),
data->broker());
// TODO(rmcilroy) Avoid having to rebuild rpo_order on schedule each time.
@@ -2205,7 +2208,6 @@ struct InstructionSelectionPhase {
data->assembler_options().enable_root_relative_access
? InstructionSelector::kEnableRootsRelativeAddressing
: InstructionSelector::kDisableRootsRelativeAddressing,
- data->info()->GetPoisoningMitigationLevel(),
data->info()->trace_turbo_json()
? InstructionSelector::kEnableTraceTurboJson
: InstructionSelector::kDisableTraceTurboJson);
@@ -2607,6 +2609,9 @@ CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
tracing_scope.stream(), isolate);
}
#endif
+ PROFILE(isolate, CodeCreateEvent(CodeEventListener::STUB_TAG,
+ Handle<AbstractCode>::cast(code),
+ compilation_info()->GetDebugName().get()));
return SUCCEEDED;
}
return FAILED;
@@ -2750,8 +2755,8 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
#if V8_ENABLE_WEBASSEMBLY
if (data->has_js_wasm_calls()) {
DCHECK(data->info()->inline_js_wasm_calls());
- Run<WasmInliningPhase>();
- RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
+ Run<JSWasmInliningPhase>();
+ RunPrintAndVerify(JSWasmInliningPhase::phase_name(), true);
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -2853,8 +2858,8 @@ bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) {
#if V8_ENABLE_WEBASSEMBLY
if (data->has_js_wasm_calls()) {
DCHECK(data->info()->inline_js_wasm_calls());
- Run<WasmInliningPhase>();
- RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
+ Run<JSWasmInliningPhase>();
+ RunPrintAndVerify(JSWasmInliningPhase::phase_name(), true);
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -2969,17 +2974,12 @@ int HashGraphForPGO(Graph* graph) {
MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind,
- const char* debug_name, Builtin builtin,
- PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
+ const char* debug_name, Builtin builtin, const AssemblerOptions& options,
const ProfileDataFromFile* profile_data) {
OptimizedCompilationInfo info(base::CStrVector(debug_name), graph->zone(),
kind);
info.set_builtin(builtin);
- if (poisoning_level != PoisoningMitigationLevel::kDontPoison) {
- info.SetPoisoningMitigationLevel(poisoning_level);
- }
-
// Construct a pipeline for scheduling and code generation.
ZoneStats zone_stats(isolate->allocator());
NodeOriginTable node_origins(graph);
@@ -3195,7 +3195,8 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
// static
void Pipeline::GenerateCodeForWasmFunction(
- OptimizedCompilationInfo* info, MachineGraph* mcgraph,
+ OptimizedCompilationInfo* info, wasm::CompilationEnv* env,
+ const wasm::WireBytesStorage* wire_bytes_storage, MachineGraph* mcgraph,
CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
NodeOriginTable* node_origins, wasm::FunctionBody function_body,
const wasm::WasmModule* module, int function_index,
@@ -3225,6 +3226,10 @@ void Pipeline::GenerateCodeForWasmFunction(
pipeline.Run<WasmLoopUnrollingPhase>(loop_info);
pipeline.RunPrintAndVerify(WasmLoopUnrollingPhase::phase_name(), true);
}
+ if (FLAG_wasm_inlining) {
+ pipeline.Run<WasmInliningPhase>(env, wire_bytes_storage);
+ pipeline.RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
+ }
const bool is_asm_js = is_asmjs_module(module);
if (FLAG_wasm_opt || is_asm_js) {
@@ -3546,18 +3551,7 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers));
AllocateRegistersForTopTier(config.get(), call_descriptor, run_verifier);
} else {
- const RegisterConfiguration* config;
- if (data->info()->GetPoisoningMitigationLevel() !=
- PoisoningMitigationLevel::kDontPoison) {
-#ifdef V8_TARGET_ARCH_IA32
- FATAL("Poisoning is not supported on ia32.");
-#else
- config = RegisterConfiguration::Poisoning();
-#endif // V8_TARGET_ARCH_IA32
- } else {
- config = RegisterConfiguration::Default();
- }
-
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
if (data->info()->IsTurboprop() && FLAG_turboprop_mid_tier_reg_alloc) {
AllocateRegistersForMidTier(config, call_descriptor, run_verifier);
} else {
@@ -3643,7 +3637,6 @@ std::ostream& operator<<(std::ostream& out,
out << "\"codeStartRegisterCheck\": "
<< s.offsets_info->code_start_register_check << ", ";
out << "\"deoptCheck\": " << s.offsets_info->deopt_check << ", ";
- out << "\"initPoison\": " << s.offsets_info->init_poison << ", ";
out << "\"blocksStart\": " << s.offsets_info->blocks_start << ", ";
out << "\"outOfLineCode\": " << s.offsets_info->out_of_line_code << ", ";
out << "\"deoptimizationExits\": " << s.offsets_info->deoptimization_exits
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index ea67b31e06..19fd715885 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -23,11 +23,13 @@ class ProfileDataFromFile;
class RegisterConfiguration;
namespace wasm {
+struct CompilationEnv;
struct FunctionBody;
class NativeModule;
struct WasmCompilationResult;
class WasmEngine;
struct WasmModule;
+class WireBytesStorage;
} // namespace wasm
namespace compiler {
@@ -54,7 +56,8 @@ class Pipeline : public AllStatic {
// Run the pipeline for the WebAssembly compilation info.
static void GenerateCodeForWasmFunction(
- OptimizedCompilationInfo* info, MachineGraph* mcgraph,
+ OptimizedCompilationInfo* info, wasm::CompilationEnv* env,
+ const wasm::WireBytesStorage* wire_bytes_storage, MachineGraph* mcgraph,
CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
NodeOriginTable* node_origins, wasm::FunctionBody function_body,
const wasm::WasmModule* module, int function_index,
@@ -78,8 +81,7 @@ class Pipeline : public AllStatic {
static MaybeHandle<Code> GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind,
- const char* debug_name, Builtin builtin,
- PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
+ const char* debug_name, Builtin builtin, const AssemblerOptions& options,
const ProfileDataFromFile* profile_data);
// ---------------------------------------------------------------------------
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index a64521d6f6..456512a867 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -168,7 +168,9 @@ base::Optional<Node*> PropertyAccessBuilder::FoldLoadDictPrototypeConstant(
Map::GetConstructorFunction(
*map_handle, *broker()->target_native_context().object())
.value();
- map = MakeRef(broker(), constructor.initial_map());
+ // {constructor.initial_map()} is loaded/stored with acquire-release
+ // semantics for constructors.
+ map = MakeRefAssumeMemoryFence(broker(), constructor.initial_map());
DCHECK(map.object()->IsJSObjectMap());
}
dependencies()->DependOnConstantInDictionaryPrototypeChain(
@@ -235,7 +237,6 @@ Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name,
Type::Any(),
MachineType::AnyTagged(),
kPointerWriteBarrier,
- LoadSensitivity::kCritical,
field_access.const_field_info};
storage = *effect = graph()->NewNode(
simplified()->LoadField(storage_access), storage, *effect, *control);
@@ -263,7 +264,6 @@ Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name,
Type::OtherInternal(),
MachineType::TaggedPointer(),
kPointerWriteBarrier,
- LoadSensitivity::kCritical,
field_access.const_field_info};
storage = *effect = graph()->NewNode(
simplified()->LoadField(storage_access), storage, *effect, *control);
@@ -291,7 +291,6 @@ Node* PropertyAccessBuilder::BuildMinimorphicLoadDataField(
access_info.field_type(),
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier,
- LoadSensitivity::kCritical,
ConstFieldInfo::None()};
return BuildLoadDataField(name, lookup_start_object, field_access,
access_info.is_inobject(), effect, control);
@@ -319,7 +318,6 @@ Node* PropertyAccessBuilder::BuildLoadDataField(
access_info.field_type(),
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier,
- LoadSensitivity::kCritical,
access_info.GetConstFieldInfo()};
if (field_representation == MachineRepresentation::kTaggedPointer ||
field_representation == MachineRepresentation::kCompressedPointer) {
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 7ed217d4e3..383d63dd69 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -18,8 +18,7 @@ namespace compiler {
RawMachineAssembler::RawMachineAssembler(
Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor,
MachineRepresentation word, MachineOperatorBuilder::Flags flags,
- MachineOperatorBuilder::AlignmentRequirements alignment_requirements,
- PoisoningMitigationLevel poisoning_level)
+ MachineOperatorBuilder::AlignmentRequirements alignment_requirements)
: isolate_(isolate),
graph_(graph),
schedule_(zone()->New<Schedule>(zone())),
@@ -30,8 +29,7 @@ RawMachineAssembler::RawMachineAssembler(
call_descriptor_(call_descriptor),
target_parameter_(nullptr),
parameters_(parameter_count(), zone()),
- current_block_(schedule()->start()),
- poisoning_level_(poisoning_level) {
+ current_block_(schedule()->start()) {
int param_count = static_cast<int>(parameter_count());
// Add an extra input for the JSFunction parameter to the start node.
graph->SetStart(graph->NewNode(common_.Start(param_count + 1)));
@@ -472,7 +470,7 @@ void RawMachineAssembler::MarkControlDeferred(Node* control_node) {
return;
case IrOpcode::kIfTrue: {
Node* branch = NodeProperties::GetControlInput(control_node);
- BranchHint hint = BranchOperatorInfoOf(branch->op()).hint;
+ BranchHint hint = BranchHintOf(branch->op());
if (hint == BranchHint::kTrue) {
// The other possibility is also deferred, so the responsible branch
// has to be before.
@@ -485,7 +483,7 @@ void RawMachineAssembler::MarkControlDeferred(Node* control_node) {
}
case IrOpcode::kIfFalse: {
Node* branch = NodeProperties::GetControlInput(control_node);
- BranchHint hint = BranchOperatorInfoOf(branch->op()).hint;
+ BranchHint hint = BranchHintOf(branch->op());
if (hint == BranchHint::kFalse) {
// The other possibility is also deferred, so the responsible branch
// has to be before.
@@ -516,11 +514,10 @@ void RawMachineAssembler::MarkControlDeferred(Node* control_node) {
}
}
- BranchOperatorInfo info = BranchOperatorInfoOf(responsible_branch->op());
- if (info.hint == new_branch_hint) return;
- NodeProperties::ChangeOp(
- responsible_branch,
- common()->Branch(new_branch_hint, info.is_safety_check));
+ BranchHint hint = BranchHintOf(responsible_branch->op());
+ if (hint == new_branch_hint) return;
+ NodeProperties::ChangeOp(responsible_branch,
+ common()->Branch(new_branch_hint));
}
Node* RawMachineAssembler::TargetParameter() {
@@ -544,9 +541,7 @@ void RawMachineAssembler::Goto(RawMachineLabel* label) {
void RawMachineAssembler::Branch(Node* condition, RawMachineLabel* true_val,
RawMachineLabel* false_val) {
DCHECK(current_block_ != schedule()->end());
- Node* branch = MakeNode(
- common()->Branch(BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck), 1,
- &condition);
+ Node* branch = MakeNode(common()->Branch(BranchHint::kNone), 1, &condition);
BasicBlock* true_block = schedule()->NewBasicBlock();
BasicBlock* false_block = schedule()->NewBasicBlock();
schedule()->AddBranch(CurrentBlock(), branch, true_block, false_block);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index a811fa7bf9..f0bb6e0425 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -52,9 +52,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
MachineOperatorBuilder::Flag::kNoFlags,
MachineOperatorBuilder::AlignmentRequirements alignment_requirements =
MachineOperatorBuilder::AlignmentRequirements::
- FullUnalignedAccessSupport(),
- PoisoningMitigationLevel poisoning_level =
- PoisoningMitigationLevel::kPoisonCriticalOnly);
+ FullUnalignedAccessSupport());
~RawMachineAssembler() = default;
RawMachineAssembler(const RawMachineAssembler&) = delete;
@@ -67,7 +65,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
CommonOperatorBuilder* common() { return &common_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
CallDescriptor* call_descriptor() const { return call_descriptor_; }
- PoisoningMitigationLevel poisoning_level() const { return poisoning_level_; }
// Only used for tests: Finalizes the schedule and exports it to be used for
// code generation. Note that this RawMachineAssembler becomes invalid after
@@ -132,19 +129,11 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
// Memory Operations.
- Node* Load(MachineType type, Node* base,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- return Load(type, base, IntPtrConstant(0), needs_poisoning);
+ Node* Load(MachineType type, Node* base) {
+ return Load(type, base, IntPtrConstant(0));
}
- Node* Load(MachineType type, Node* base, Node* index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ Node* Load(MachineType type, Node* base, Node* index) {
const Operator* op = machine()->Load(type);
- CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level_);
- if (needs_poisoning == LoadSensitivity::kCritical &&
- poisoning_level_ == PoisoningMitigationLevel::kPoisonCriticalOnly) {
- op = machine()->PoisonedLoad(type);
- }
-
Node* load = AddNode(op, base, index);
return load;
}
@@ -174,10 +163,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
bool IsMapOffsetConstantMinusTag(int offset) {
return offset == HeapObject::kMapOffset - kHeapObjectTag;
}
- Node* LoadFromObject(
- MachineType type, Node* base, Node* offset,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- CHECK_EQ(needs_poisoning, LoadSensitivity::kSafe);
+ Node* LoadFromObject(MachineType type, Node* base, Node* offset) {
DCHECK_IMPLIES(V8_MAP_PACKING_BOOL && IsMapOffsetConstantMinusTag(offset),
type == MachineType::MapInHeader());
ObjectAccess access = {type, WriteBarrierKind::kNoWriteBarrier};
@@ -253,20 +239,20 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
// Atomic memory operations.
- Node* AtomicLoad(MachineType type, Node* base, Node* index) {
- DCHECK_NE(type.representation(), MachineRepresentation::kWord64);
- return AddNode(machine()->Word32AtomicLoad(type), base, index);
+ Node* AtomicLoad(AtomicLoadParameters rep, Node* base, Node* index) {
+ DCHECK_NE(rep.representation().representation(),
+ MachineRepresentation::kWord64);
+ return AddNode(machine()->Word32AtomicLoad(rep), base, index);
}
- Node* AtomicLoad64(Node* base, Node* index) {
+ Node* AtomicLoad64(AtomicLoadParameters rep, Node* base, Node* index) {
if (machine()->Is64()) {
// This uses Uint64() intentionally: AtomicLoad is not implemented for
// Int64(), which is fine because the machine instruction only cares
// about words.
- return AddNode(machine()->Word64AtomicLoad(MachineType::Uint64()), base,
- index);
+ return AddNode(machine()->Word64AtomicLoad(rep), base, index);
} else {
- return AddNode(machine()->Word32AtomicPairLoad(), base, index);
+ return AddNode(machine()->Word32AtomicPairLoad(rep.order()), base, index);
}
}
@@ -276,22 +262,24 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
#define VALUE_HALVES value, value_high
#endif
- Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
+ Node* AtomicStore(AtomicStoreParameters params, Node* base, Node* index,
Node* value) {
DCHECK(!IsMapOffsetConstantMinusTag(index));
- DCHECK_NE(rep, MachineRepresentation::kWord64);
- return AddNode(machine()->Word32AtomicStore(rep), base, index, value);
+ DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
+ return AddNode(machine()->Word32AtomicStore(params), base, index, value);
}
- Node* AtomicStore64(Node* base, Node* index, Node* value, Node* value_high) {
+ Node* AtomicStore64(AtomicStoreParameters params, Node* base, Node* index,
+ Node* value, Node* value_high) {
if (machine()->Is64()) {
DCHECK_NULL(value_high);
- return AddNode(
- machine()->Word64AtomicStore(MachineRepresentation::kWord64), base,
- index, value);
+ return AddNode(machine()->Word64AtomicStore(params), base, index, value);
} else {
- return AddNode(machine()->Word32AtomicPairStore(), base, index,
- VALUE_HALVES);
+ DCHECK(params.representation() != MachineRepresentation::kTaggedPointer &&
+ params.representation() != MachineRepresentation::kTaggedSigned &&
+ params.representation() != MachineRepresentation::kTagged);
+ return AddNode(machine()->Word32AtomicPairStore(params.order()), base,
+ index, VALUE_HALVES);
}
}
@@ -959,20 +947,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
}
- Node* TaggedPoisonOnSpeculation(Node* value) {
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- return AddNode(machine()->TaggedPoisonOnSpeculation(), value);
- }
- return value;
- }
-
- Node* WordPoisonOnSpeculation(Node* value) {
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- return AddNode(machine()->WordPoisonOnSpeculation(), value);
- }
- return value;
- }
-
// Call a given call descriptor and the given arguments.
// The call target is passed as part of the {inputs} array.
Node* CallN(CallDescriptor* call_descriptor, int input_count,
@@ -1136,6 +1110,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
CommonOperatorBuilder* common);
Isolate* isolate_;
+
Graph* graph_;
Schedule* schedule_;
SourcePositionTable* source_positions_;
@@ -1146,7 +1121,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* target_parameter_;
NodeVector parameters_;
BasicBlock* current_block_;
- PoisoningMitigationLevel poisoning_level_;
};
class V8_EXPORT_PRIVATE RawMachineLabel final {
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 1c07a23dde..6416eed376 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -1735,11 +1735,9 @@ class RepresentationSelector {
VisitBinop<T>(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower<T>()) {
- if (lowering->poisoning_level_ ==
- PoisoningMitigationLevel::kDontPoison &&
- (index_type.IsNone() || length_type.IsNone() ||
- (index_type.Min() >= 0.0 &&
- index_type.Max() < length_type.Min()))) {
+ if (index_type.IsNone() || length_type.IsNone() ||
+ (index_type.Min() >= 0.0 &&
+ index_type.Max() < length_type.Min())) {
// The bounds check is redundant if we already know that
// the index is within the bounds of [0.0, length[.
// TODO(neis): Move this into TypedOptimization?
@@ -3181,11 +3179,6 @@ class RepresentationSelector {
}
case IrOpcode::kCheckBounds:
return VisitCheckBounds<T>(node, lowering);
- case IrOpcode::kPoisonIndex: {
- VisitUnop<T>(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- return;
- }
case IrOpcode::kCheckHeapObject: {
if (InputCannotBe(node, Type::SignedSmall())) {
VisitUnop<T>(node, UseInfo::AnyTagged(),
@@ -3835,7 +3828,7 @@ class RepresentationSelector {
case IrOpcode::kDateNow:
VisitInputs<T>(node);
- return SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
+ return SetOutput<T>(node, MachineRepresentation::kTagged);
case IrOpcode::kFrameState:
return VisitFrameState<T>(FrameState{node});
case IrOpcode::kStateValues:
@@ -4225,18 +4218,19 @@ void RepresentationSelector::InsertUnreachableIfNecessary<LOWER>(Node* node) {
}
}
-SimplifiedLowering::SimplifiedLowering(
- JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
- SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- PoisoningMitigationLevel poisoning_level, TickCounter* tick_counter,
- Linkage* linkage, ObserveNodeManager* observe_node_manager)
+SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
+ Zone* zone,
+ SourcePositionTable* source_positions,
+ NodeOriginTable* node_origins,
+ TickCounter* tick_counter,
+ Linkage* linkage,
+ ObserveNodeManager* observe_node_manager)
: jsgraph_(jsgraph),
broker_(broker),
zone_(zone),
type_cache_(TypeCache::Get()),
source_positions_(source_positions),
node_origins_(node_origins),
- poisoning_level_(poisoning_level),
tick_counter_(tick_counter),
linkage_(linkage),
observe_node_manager_(observe_node_manager) {}
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index 54017b34f7..f60bc1a7e3 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -31,7 +31,6 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
SourcePositionTable* source_position,
NodeOriginTable* node_origins,
- PoisoningMitigationLevel poisoning_level,
TickCounter* tick_counter, Linkage* linkage,
ObserveNodeManager* observe_node_manager = nullptr);
~SimplifiedLowering() = default;
@@ -83,8 +82,6 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
SourcePositionTable* source_positions_;
NodeOriginTable* node_origins_;
- PoisoningMitigationLevel poisoning_level_;
-
TickCounter* const tick_counter_;
Linkage* const linkage_;
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 9c4f8f083a..9461194b55 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -73,22 +73,6 @@ size_t hash_value(FieldAccess const& access) {
access.is_store_in_literal);
}
-size_t hash_value(LoadSensitivity load_sensitivity) {
- return static_cast<size_t>(load_sensitivity);
-}
-
-std::ostream& operator<<(std::ostream& os, LoadSensitivity load_sensitivity) {
- switch (load_sensitivity) {
- case LoadSensitivity::kCritical:
- return os << "Critical";
- case LoadSensitivity::kSafe:
- return os << "Safe";
- case LoadSensitivity::kUnsafe:
- return os << "Unsafe";
- }
- UNREACHABLE();
-}
-
std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
os << "[" << access.base_is_tagged << ", " << access.offset << ", ";
#ifdef OBJECT_PRINT
@@ -107,9 +91,6 @@ std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
if (access.is_store_in_literal) {
os << " (store in literal)";
}
- if (FLAG_untrusted_code_mitigations) {
- os << ", " << access.load_sensitivity;
- }
os << "]";
return os;
}
@@ -145,9 +126,6 @@ std::ostream& operator<<(std::ostream& os, ElementAccess const& access) {
os << access.base_is_tagged << ", " << access.header_size << ", "
<< access.type << ", " << access.machine_type << ", "
<< access.write_barrier_kind;
- if (FLAG_untrusted_code_mitigations) {
- os << ", " << access.load_sensitivity;
- }
return os;
}
@@ -719,129 +697,128 @@ bool operator==(CheckMinusZeroParameters const& lhs,
return lhs.mode() == rhs.mode() && lhs.feedback() == rhs.feedback();
}
-#define PURE_OP_LIST(V) \
- V(BooleanNot, Operator::kNoProperties, 1, 0) \
- V(NumberEqual, Operator::kCommutative, 2, 0) \
- V(NumberLessThan, Operator::kNoProperties, 2, 0) \
- V(NumberLessThanOrEqual, Operator::kNoProperties, 2, 0) \
- V(NumberAdd, Operator::kCommutative, 2, 0) \
- V(NumberSubtract, Operator::kNoProperties, 2, 0) \
- V(NumberMultiply, Operator::kCommutative, 2, 0) \
- V(NumberDivide, Operator::kNoProperties, 2, 0) \
- V(NumberModulus, Operator::kNoProperties, 2, 0) \
- V(NumberBitwiseOr, Operator::kCommutative, 2, 0) \
- V(NumberBitwiseXor, Operator::kCommutative, 2, 0) \
- V(NumberBitwiseAnd, Operator::kCommutative, 2, 0) \
- V(NumberShiftLeft, Operator::kNoProperties, 2, 0) \
- V(NumberShiftRight, Operator::kNoProperties, 2, 0) \
- V(NumberShiftRightLogical, Operator::kNoProperties, 2, 0) \
- V(NumberImul, Operator::kCommutative, 2, 0) \
- V(NumberAbs, Operator::kNoProperties, 1, 0) \
- V(NumberClz32, Operator::kNoProperties, 1, 0) \
- V(NumberCeil, Operator::kNoProperties, 1, 0) \
- V(NumberFloor, Operator::kNoProperties, 1, 0) \
- V(NumberFround, Operator::kNoProperties, 1, 0) \
- V(NumberAcos, Operator::kNoProperties, 1, 0) \
- V(NumberAcosh, Operator::kNoProperties, 1, 0) \
- V(NumberAsin, Operator::kNoProperties, 1, 0) \
- V(NumberAsinh, Operator::kNoProperties, 1, 0) \
- V(NumberAtan, Operator::kNoProperties, 1, 0) \
- V(NumberAtan2, Operator::kNoProperties, 2, 0) \
- V(NumberAtanh, Operator::kNoProperties, 1, 0) \
- V(NumberCbrt, Operator::kNoProperties, 1, 0) \
- V(NumberCos, Operator::kNoProperties, 1, 0) \
- V(NumberCosh, Operator::kNoProperties, 1, 0) \
- V(NumberExp, Operator::kNoProperties, 1, 0) \
- V(NumberExpm1, Operator::kNoProperties, 1, 0) \
- V(NumberLog, Operator::kNoProperties, 1, 0) \
- V(NumberLog1p, Operator::kNoProperties, 1, 0) \
- V(NumberLog10, Operator::kNoProperties, 1, 0) \
- V(NumberLog2, Operator::kNoProperties, 1, 0) \
- V(NumberMax, Operator::kNoProperties, 2, 0) \
- V(NumberMin, Operator::kNoProperties, 2, 0) \
- V(NumberPow, Operator::kNoProperties, 2, 0) \
- V(NumberRound, Operator::kNoProperties, 1, 0) \
- V(NumberSign, Operator::kNoProperties, 1, 0) \
- V(NumberSin, Operator::kNoProperties, 1, 0) \
- V(NumberSinh, Operator::kNoProperties, 1, 0) \
- V(NumberSqrt, Operator::kNoProperties, 1, 0) \
- V(NumberTan, Operator::kNoProperties, 1, 0) \
- V(NumberTanh, Operator::kNoProperties, 1, 0) \
- V(NumberTrunc, Operator::kNoProperties, 1, 0) \
- V(NumberToBoolean, Operator::kNoProperties, 1, 0) \
- V(NumberToInt32, Operator::kNoProperties, 1, 0) \
- V(NumberToString, Operator::kNoProperties, 1, 0) \
- V(NumberToUint32, Operator::kNoProperties, 1, 0) \
- V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
- V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
- V(BigIntNegate, Operator::kNoProperties, 1, 0) \
- V(StringConcat, Operator::kNoProperties, 3, 0) \
- V(StringToNumber, Operator::kNoProperties, 1, 0) \
- V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \
- V(StringFromSingleCodePoint, Operator::kNoProperties, 1, 0) \
- V(StringIndexOf, Operator::kNoProperties, 3, 0) \
- V(StringLength, Operator::kNoProperties, 1, 0) \
- V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
- V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \
- V(TypeOf, Operator::kNoProperties, 1, 1) \
- V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
- V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
- V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToInt64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0) \
- V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
- V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
- V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeUint64ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \
- V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \
- V(TruncateBigIntToUint64, Operator::kNoProperties, 1, 0) \
- V(ChangeUint64ToBigInt, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0) \
- V(ObjectIsArrayBufferView, Operator::kNoProperties, 1, 0) \
- V(ObjectIsBigInt, Operator::kNoProperties, 1, 0) \
- V(ObjectIsCallable, Operator::kNoProperties, 1, 0) \
- V(ObjectIsConstructor, Operator::kNoProperties, 1, 0) \
- V(ObjectIsDetectableCallable, Operator::kNoProperties, 1, 0) \
- V(ObjectIsMinusZero, Operator::kNoProperties, 1, 0) \
- V(NumberIsMinusZero, Operator::kNoProperties, 1, 0) \
- V(ObjectIsNaN, Operator::kNoProperties, 1, 0) \
- V(NumberIsNaN, Operator::kNoProperties, 1, 0) \
- V(ObjectIsNonCallable, Operator::kNoProperties, 1, 0) \
- V(ObjectIsNumber, Operator::kNoProperties, 1, 0) \
- V(ObjectIsReceiver, Operator::kNoProperties, 1, 0) \
- V(ObjectIsSmi, Operator::kNoProperties, 1, 0) \
- V(ObjectIsString, Operator::kNoProperties, 1, 0) \
- V(ObjectIsSymbol, Operator::kNoProperties, 1, 0) \
- V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \
- V(NumberIsFloat64Hole, Operator::kNoProperties, 1, 0) \
- V(NumberIsFinite, Operator::kNoProperties, 1, 0) \
- V(ObjectIsFiniteNumber, Operator::kNoProperties, 1, 0) \
- V(NumberIsInteger, Operator::kNoProperties, 1, 0) \
- V(ObjectIsSafeInteger, Operator::kNoProperties, 1, 0) \
- V(NumberIsSafeInteger, Operator::kNoProperties, 1, 0) \
- V(ObjectIsInteger, Operator::kNoProperties, 1, 0) \
- V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
- V(SameValue, Operator::kCommutative, 2, 0) \
- V(SameValueNumbersOnly, Operator::kCommutative, 2, 0) \
- V(NumberSameValue, Operator::kCommutative, 2, 0) \
- V(ReferenceEqual, Operator::kCommutative, 2, 0) \
- V(StringEqual, Operator::kCommutative, 2, 0) \
- V(StringLessThan, Operator::kNoProperties, 2, 0) \
- V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0) \
- V(ToBoolean, Operator::kNoProperties, 1, 0) \
- V(NewConsString, Operator::kNoProperties, 3, 0) \
- V(PoisonIndex, Operator::kNoProperties, 1, 0)
+#define PURE_OP_LIST(V) \
+ V(BooleanNot, Operator::kNoProperties, 1, 0) \
+ V(NumberEqual, Operator::kCommutative, 2, 0) \
+ V(NumberLessThan, Operator::kNoProperties, 2, 0) \
+ V(NumberLessThanOrEqual, Operator::kNoProperties, 2, 0) \
+ V(NumberAdd, Operator::kCommutative, 2, 0) \
+ V(NumberSubtract, Operator::kNoProperties, 2, 0) \
+ V(NumberMultiply, Operator::kCommutative, 2, 0) \
+ V(NumberDivide, Operator::kNoProperties, 2, 0) \
+ V(NumberModulus, Operator::kNoProperties, 2, 0) \
+ V(NumberBitwiseOr, Operator::kCommutative, 2, 0) \
+ V(NumberBitwiseXor, Operator::kCommutative, 2, 0) \
+ V(NumberBitwiseAnd, Operator::kCommutative, 2, 0) \
+ V(NumberShiftLeft, Operator::kNoProperties, 2, 0) \
+ V(NumberShiftRight, Operator::kNoProperties, 2, 0) \
+ V(NumberShiftRightLogical, Operator::kNoProperties, 2, 0) \
+ V(NumberImul, Operator::kCommutative, 2, 0) \
+ V(NumberAbs, Operator::kNoProperties, 1, 0) \
+ V(NumberClz32, Operator::kNoProperties, 1, 0) \
+ V(NumberCeil, Operator::kNoProperties, 1, 0) \
+ V(NumberFloor, Operator::kNoProperties, 1, 0) \
+ V(NumberFround, Operator::kNoProperties, 1, 0) \
+ V(NumberAcos, Operator::kNoProperties, 1, 0) \
+ V(NumberAcosh, Operator::kNoProperties, 1, 0) \
+ V(NumberAsin, Operator::kNoProperties, 1, 0) \
+ V(NumberAsinh, Operator::kNoProperties, 1, 0) \
+ V(NumberAtan, Operator::kNoProperties, 1, 0) \
+ V(NumberAtan2, Operator::kNoProperties, 2, 0) \
+ V(NumberAtanh, Operator::kNoProperties, 1, 0) \
+ V(NumberCbrt, Operator::kNoProperties, 1, 0) \
+ V(NumberCos, Operator::kNoProperties, 1, 0) \
+ V(NumberCosh, Operator::kNoProperties, 1, 0) \
+ V(NumberExp, Operator::kNoProperties, 1, 0) \
+ V(NumberExpm1, Operator::kNoProperties, 1, 0) \
+ V(NumberLog, Operator::kNoProperties, 1, 0) \
+ V(NumberLog1p, Operator::kNoProperties, 1, 0) \
+ V(NumberLog10, Operator::kNoProperties, 1, 0) \
+ V(NumberLog2, Operator::kNoProperties, 1, 0) \
+ V(NumberMax, Operator::kNoProperties, 2, 0) \
+ V(NumberMin, Operator::kNoProperties, 2, 0) \
+ V(NumberPow, Operator::kNoProperties, 2, 0) \
+ V(NumberRound, Operator::kNoProperties, 1, 0) \
+ V(NumberSign, Operator::kNoProperties, 1, 0) \
+ V(NumberSin, Operator::kNoProperties, 1, 0) \
+ V(NumberSinh, Operator::kNoProperties, 1, 0) \
+ V(NumberSqrt, Operator::kNoProperties, 1, 0) \
+ V(NumberTan, Operator::kNoProperties, 1, 0) \
+ V(NumberTanh, Operator::kNoProperties, 1, 0) \
+ V(NumberTrunc, Operator::kNoProperties, 1, 0) \
+ V(NumberToBoolean, Operator::kNoProperties, 1, 0) \
+ V(NumberToInt32, Operator::kNoProperties, 1, 0) \
+ V(NumberToString, Operator::kNoProperties, 1, 0) \
+ V(NumberToUint32, Operator::kNoProperties, 1, 0) \
+ V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
+ V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
+ V(BigIntNegate, Operator::kNoProperties, 1, 0) \
+ V(StringConcat, Operator::kNoProperties, 3, 0) \
+ V(StringToNumber, Operator::kNoProperties, 1, 0) \
+ V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \
+ V(StringFromSingleCodePoint, Operator::kNoProperties, 1, 0) \
+ V(StringIndexOf, Operator::kNoProperties, 3, 0) \
+ V(StringLength, Operator::kNoProperties, 1, 0) \
+ V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
+ V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \
+ V(TypeOf, Operator::kNoProperties, 1, 1) \
+ V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
+ V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
+ V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToInt64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0) \
+ V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint64ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \
+ V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \
+ V(TruncateBigIntToUint64, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint64ToBigInt, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsArrayBufferView, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsBigInt, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsConstructor, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsDetectableCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsMinusZero, Operator::kNoProperties, 1, 0) \
+ V(NumberIsMinusZero, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsNaN, Operator::kNoProperties, 1, 0) \
+ V(NumberIsNaN, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsNonCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsNumber, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsReceiver, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsSmi, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsString, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsSymbol, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \
+ V(NumberIsFloat64Hole, Operator::kNoProperties, 1, 0) \
+ V(NumberIsFinite, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsFiniteNumber, Operator::kNoProperties, 1, 0) \
+ V(NumberIsInteger, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsSafeInteger, Operator::kNoProperties, 1, 0) \
+ V(NumberIsSafeInteger, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsInteger, Operator::kNoProperties, 1, 0) \
+ V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
+ V(SameValue, Operator::kCommutative, 2, 0) \
+ V(SameValueNumbersOnly, Operator::kCommutative, 2, 0) \
+ V(NumberSameValue, Operator::kCommutative, 2, 0) \
+ V(ReferenceEqual, Operator::kCommutative, 2, 0) \
+ V(StringEqual, Operator::kCommutative, 2, 0) \
+ V(StringLessThan, Operator::kNoProperties, 2, 0) \
+ V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0) \
+ V(ToBoolean, Operator::kNoProperties, 1, 0) \
+ V(NewConsString, Operator::kNoProperties, 3, 0)
#define EFFECT_DEPENDENT_OP_LIST(V) \
V(BigIntAdd, Operator::kNoProperties, 2, 1) \
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index d7a5901448..0602b795a9 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -46,10 +46,6 @@ size_t hash_value(BaseTaggedness);
std::ostream& operator<<(std::ostream&, BaseTaggedness);
-size_t hash_value(LoadSensitivity);
-
-std::ostream& operator<<(std::ostream&, LoadSensitivity);
-
struct ConstFieldInfo {
// the map that introduced the const field, if any. An access is considered
// mutable iff the handle is null.
@@ -82,7 +78,6 @@ struct FieldAccess {
Type type; // type of the field.
MachineType machine_type; // machine type of the field.
WriteBarrierKind write_barrier_kind; // write barrier hint.
- LoadSensitivity load_sensitivity; // load safety for poisoning.
ConstFieldInfo const_field_info; // the constness of this access, and the
// field owner map, if the access is const
bool is_store_in_literal; // originates from a kStoreInLiteral access
@@ -96,14 +91,12 @@ struct FieldAccess {
type(Type::None()),
machine_type(MachineType::None()),
write_barrier_kind(kFullWriteBarrier),
- load_sensitivity(LoadSensitivity::kUnsafe),
const_field_info(ConstFieldInfo::None()),
is_store_in_literal(false) {}
FieldAccess(BaseTaggedness base_is_tagged, int offset, MaybeHandle<Name> name,
MaybeHandle<Map> map, Type type, MachineType machine_type,
WriteBarrierKind write_barrier_kind,
- LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe,
ConstFieldInfo const_field_info = ConstFieldInfo::None(),
bool is_store_in_literal = false
#ifdef V8_HEAP_SANDBOX
@@ -118,7 +111,6 @@ struct FieldAccess {
type(type),
machine_type(machine_type),
write_barrier_kind(write_barrier_kind),
- load_sensitivity(load_sensitivity),
const_field_info(const_field_info),
is_store_in_literal(is_store_in_literal)
#ifdef V8_HEAP_SANDBOX
@@ -162,25 +154,21 @@ struct ElementAccess {
Type type; // type of the element.
MachineType machine_type; // machine type of the element.
WriteBarrierKind write_barrier_kind; // write barrier hint.
- LoadSensitivity load_sensitivity; // load safety for poisoning.
ElementAccess()
: base_is_tagged(kTaggedBase),
header_size(0),
type(Type::None()),
machine_type(MachineType::None()),
- write_barrier_kind(kFullWriteBarrier),
- load_sensitivity(LoadSensitivity::kUnsafe) {}
+ write_barrier_kind(kFullWriteBarrier) {}
ElementAccess(BaseTaggedness base_is_tagged, int header_size, Type type,
- MachineType machine_type, WriteBarrierKind write_barrier_kind,
- LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe)
+ MachineType machine_type, WriteBarrierKind write_barrier_kind)
: base_is_tagged(base_is_tagged),
header_size(header_size),
type(type),
machine_type(machine_type),
- write_barrier_kind(write_barrier_kind),
- load_sensitivity(load_sensitivity) {}
+ write_barrier_kind(write_barrier_kind) {}
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
@@ -926,7 +914,6 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* TruncateTaggedToBit();
const Operator* TruncateTaggedPointerToBit();
- const Operator* PoisonIndex();
const Operator* CompareMaps(ZoneHandleSet<Map>);
const Operator* MapGuard(ZoneHandleSet<Map> maps);
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index ce9b6fdb18..5025233c88 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -814,9 +814,9 @@ Reduction TypedOptimization::ReduceJSToNumberInput(Node* input) {
HeapObjectMatcher m(input);
if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
StringRef input_value = m.Ref(broker()).AsString();
- double number;
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(number, input_value.ToNumber());
- return Replace(jsgraph()->Constant(number));
+ base::Optional<double> number = input_value.ToNumber();
+ if (!number.has_value()) return NoChange();
+ return Replace(jsgraph()->Constant(number.value()));
}
}
if (input_type.IsHeapConstant()) {
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 529f1cc7bb..a96d1ea981 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -882,9 +882,10 @@ bool Typer::Visitor::InductionVariablePhiTypeIsPrefixedPoint(
InductionVariable* induction_var) {
Node* node = induction_var->phi();
DCHECK_EQ(node->opcode(), IrOpcode::kInductionVariablePhi);
+ Node* arith = node->InputAt(1);
Type type = NodeProperties::GetType(node);
Type initial_type = Operand(node, 0);
- Node* arith = node->InputAt(1);
+ Type arith_type = Operand(node, 1);
Type increment_type = Operand(node, 2);
// Intersect {type} with useful bounds.
@@ -910,26 +911,30 @@ bool Typer::Visitor::InductionVariablePhiTypeIsPrefixedPoint(
type = Type::Intersect(type, bound_type, typer_->zone());
}
- // Apply ordinary typing to the "increment" operation.
- // clang-format off
- switch (arith->opcode()) {
+ if (arith_type.IsNone()) {
+ type = Type::None();
+ } else {
+ // Apply ordinary typing to the "increment" operation.
+ // clang-format off
+ switch (arith->opcode()) {
#define CASE(x) \
- case IrOpcode::k##x: \
- type = Type##x(type, increment_type); \
- break;
- CASE(JSAdd)
- CASE(JSSubtract)
- CASE(NumberAdd)
- CASE(NumberSubtract)
- CASE(SpeculativeNumberAdd)
- CASE(SpeculativeNumberSubtract)
- CASE(SpeculativeSafeIntegerAdd)
- CASE(SpeculativeSafeIntegerSubtract)
+ case IrOpcode::k##x: \
+ type = Type##x(type, increment_type); \
+ break;
+ CASE(JSAdd)
+ CASE(JSSubtract)
+ CASE(NumberAdd)
+ CASE(NumberSubtract)
+ CASE(SpeculativeNumberAdd)
+ CASE(SpeculativeNumberSubtract)
+ CASE(SpeculativeSafeIntegerAdd)
+ CASE(SpeculativeSafeIntegerSubtract)
#undef CASE
- default:
- UNREACHABLE();
+ default:
+ UNREACHABLE();
+ }
+ // clang-format on
}
- // clang-format on
type = Type::Union(initial_type, type, typer_->zone());
@@ -2065,10 +2070,6 @@ Type Typer::Visitor::TypeStringLength(Node* node) {
Type Typer::Visitor::TypeStringSubstring(Node* node) { return Type::String(); }
-Type Typer::Visitor::TypePoisonIndex(Node* node) {
- return Type::Union(Operand(node, 0), typer_->cache_->kSingletonZero, zone());
-}
-
Type Typer::Visitor::TypeCheckBounds(Node* node) {
return typer_->operation_typer_.CheckBounds(Operand(node, 0),
Operand(node, 1));
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index f33edaa6c0..a0f2aa569d 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -1422,10 +1422,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 1, TypeCache::Get()->kPositiveSafeInteger);
CheckTypeIs(node, TypeCache::Get()->kPositiveSafeInteger);
break;
- case IrOpcode::kPoisonIndex:
- CheckValueInputIs(node, 0, Type::Unsigned32());
- CheckTypeIs(node, Type::Unsigned32());
- break;
case IrOpcode::kCheckClosure:
// Any -> Function
CheckValueInputIs(node, 0, Type::Any());
@@ -1641,7 +1637,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// -----------------------
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
- case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
case IrOpcode::kStore:
@@ -1817,9 +1812,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord32PairShl:
case IrOpcode::kWord32PairShr:
case IrOpcode::kWord32PairSar:
- case IrOpcode::kTaggedPoisonOnSpeculation:
- case IrOpcode::kWord32PoisonOnSpeculation:
- case IrOpcode::kWord64PoisonOnSpeculation:
case IrOpcode::kLoadStackCheckOffset:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index f91c21fd1d..f6f6c3844f 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -44,6 +44,7 @@
#include "src/roots/roots.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/code-space-access.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/graph-builder-interface.h"
@@ -196,14 +197,7 @@ class WasmGraphAssembler : public GraphAssembler {
return Call(call_descriptor, call_target, args...);
}
- void EnsureEnd() {
- if (graph()->end() == nullptr) {
- graph()->SetEnd(graph()->NewNode(mcgraph()->common()->End(0)));
- }
- }
-
void MergeControlToEnd(Node* node) {
- EnsureEnd();
NodeProperties::MergeControlToEnd(graph(), mcgraph()->common(), node);
}
@@ -212,7 +206,6 @@ class WasmGraphAssembler : public GraphAssembler {
if (FLAG_debug_code) {
auto ok = MakeLabel();
GotoIfNot(condition, &ok);
- EnsureEnd();
Unreachable();
Bind(&ok);
}
@@ -472,7 +465,6 @@ WasmGraphBuilder::WasmGraphBuilder(
mcgraph_(mcgraph),
env_(env),
has_simd_(ContainsSimd(sig)),
- untrusted_code_mitigations_(FLAG_untrusted_code_mitigations),
sig_(sig),
source_position_table_(source_position_table),
isolate_(isolate) {
@@ -501,6 +493,8 @@ void WasmGraphBuilder::Start(unsigned params) {
gasm_->LoadFunctionDataFromJSFunction(
Param(Linkage::kJSCallClosureParamIndex, "%closure")))
: Param(wasm::kWasmInstanceParameterIndex);
+
+ graph()->SetEnd(graph()->NewNode(mcgraph()->common()->End(0)));
}
Node* WasmGraphBuilder::Param(int index, const char* debug_name) {
@@ -2901,13 +2895,13 @@ Node* WasmGraphBuilder::BuildCallNode(const wasm::FunctionSig* sig,
return call;
}
-Node* WasmGraphBuilder::BuildWasmCall(
- const wasm::FunctionSig* sig, base::Vector<Node*> args,
- base::Vector<Node*> rets, wasm::WasmCodePosition position,
- Node* instance_node, UseRetpoline use_retpoline, Node* frame_state) {
- CallDescriptor* call_descriptor =
- GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline,
- kWasmFunction, frame_state != nullptr);
+Node* WasmGraphBuilder::BuildWasmCall(const wasm::FunctionSig* sig,
+ base::Vector<Node*> args,
+ base::Vector<Node*> rets,
+ wasm::WasmCodePosition position,
+ Node* instance_node, Node* frame_state) {
+ CallDescriptor* call_descriptor = GetWasmCallDescriptor(
+ mcgraph()->zone(), sig, kWasmFunction, frame_state != nullptr);
const Operator* op = mcgraph()->common()->Call(call_descriptor);
Node* call =
BuildCallNode(sig, args, position, instance_node, op, frame_state);
@@ -2935,10 +2929,9 @@ Node* WasmGraphBuilder::BuildWasmCall(
Node* WasmGraphBuilder::BuildWasmReturnCall(const wasm::FunctionSig* sig,
base::Vector<Node*> args,
wasm::WasmCodePosition position,
- Node* instance_node,
- UseRetpoline use_retpoline) {
+ Node* instance_node) {
CallDescriptor* call_descriptor =
- GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline);
+ GetWasmCallDescriptor(mcgraph()->zone(), sig);
const Operator* op = mcgraph()->common()->TailCall(call_descriptor);
Node* call = BuildCallNode(sig, args, position, instance_node, op);
@@ -2982,15 +2975,13 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
Node* target_node = gasm_->LoadFromObject(
MachineType::Pointer(), imported_targets, func_index_times_pointersize);
args[0] = target_node;
- const UseRetpoline use_retpoline =
- untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
switch (continuation) {
case kCallContinues:
- return BuildWasmCall(sig, args, rets, position, ref_node, use_retpoline);
+ return BuildWasmCall(sig, args, rets, position, ref_node);
case kReturnCall:
DCHECK(rets.empty());
- return BuildWasmReturnCall(sig, args, position, ref_node, use_retpoline);
+ return BuildWasmReturnCall(sig, args, position, ref_node);
}
}
@@ -3010,7 +3001,7 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, base::Vector<Node*> args,
Address code = static_cast<Address>(index);
args[0] = mcgraph()->RelocatableIntPtrConstant(code, RelocInfo::WASM_CALL);
- return BuildWasmCall(sig, args, rets, position, nullptr, kNoRetpoline);
+ return BuildWasmCall(sig, args, rets, position, nullptr);
}
Node* WasmGraphBuilder::CallIndirect(uint32_t table_index, uint32_t sig_index,
@@ -3095,16 +3086,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
Node* in_bounds = gasm_->Uint32LessThan(key, ift_size);
TrapIfFalse(wasm::kTrapTableOutOfBounds, in_bounds, position);
- // Mask the key to prevent SSCA.
- if (untrusted_code_mitigations_) {
- // mask = ((key - size) & ~key) >> 31
- Node* neg_key = gasm_->Word32Xor(key, Int32Constant(-1));
- Node* masked_diff =
- gasm_->Word32And(gasm_->Int32Sub(key, ift_size), neg_key);
- Node* mask = gasm_->Word32Sar(masked_diff, Int32Constant(31));
- key = gasm_->Word32And(key, mask);
- }
-
const wasm::ValueType table_type = env_->module->tables[table_index].type;
// Check that the table entry is not null and that the type of the function is
// **identical with** the function type declared at the call site (no
@@ -3140,16 +3121,12 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
intptr_scaled_key);
args[0] = target;
- const UseRetpoline use_retpoline =
- untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
switch (continuation) {
case kCallContinues:
- return BuildWasmCall(sig, args, rets, position, target_instance,
- use_retpoline);
+ return BuildWasmCall(sig, args, rets, position, target_instance);
case kReturnCall:
- return BuildWasmReturnCall(sig, args, position, target_instance,
- use_retpoline);
+ return BuildWasmReturnCall(sig, args, position, target_instance);
}
}
@@ -3244,14 +3221,9 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index,
args[0] = end_label.PhiAt(0);
- const UseRetpoline use_retpoline =
- untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
-
Node* call = continuation == kCallContinues
- ? BuildWasmCall(sig, args, rets, position, instance_node,
- use_retpoline)
- : BuildWasmReturnCall(sig, args, position, instance_node,
- use_retpoline);
+ ? BuildWasmCall(sig, args, rets, position, instance_node)
+ : BuildWasmReturnCall(sig, args, position, instance_node);
return call;
}
@@ -3287,7 +3259,7 @@ Node* WasmGraphBuilder::ReturnCall(uint32_t index, base::Vector<Node*> args,
Address code = static_cast<Address>(index);
args[0] = mcgraph()->RelocatableIntPtrConstant(code, RelocInfo::WASM_CALL);
- return BuildWasmReturnCall(sig, args, position, nullptr, kNoRetpoline);
+ return BuildWasmReturnCall(sig, args, position, nullptr);
}
Node* WasmGraphBuilder::ReturnCallIndirect(uint32_t table_index,
@@ -3416,15 +3388,6 @@ void WasmGraphBuilder::InitInstanceCache(
// Load the memory size.
instance_cache->mem_size =
LOAD_MUTABLE_INSTANCE_FIELD(MemorySize, MachineType::UintPtr());
-
- if (untrusted_code_mitigations_) {
- // Load the memory mask.
- instance_cache->mem_mask =
- LOAD_INSTANCE_FIELD(MemoryMask, MachineType::UintPtr());
- } else {
- // Explicitly set to nullptr to ensure a SEGV when we try to use it.
- instance_cache->mem_mask = nullptr;
- }
}
void WasmGraphBuilder::PrepareInstanceCacheForLoop(
@@ -3435,10 +3398,6 @@ void WasmGraphBuilder::PrepareInstanceCacheForLoop(
INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation());
INTRODUCE_PHI(mem_size, MachineType::PointerRepresentation());
- if (untrusted_code_mitigations_) {
- INTRODUCE_PHI(mem_mask, MachineType::PointerRepresentation());
- }
-
#undef INTRODUCE_PHI
}
@@ -3453,10 +3412,6 @@ void WasmGraphBuilder::NewInstanceCacheMerge(WasmInstanceCacheNodes* to,
INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation());
INTRODUCE_PHI(mem_size, MachineRepresentation::kWord32);
- if (untrusted_code_mitigations_) {
- INTRODUCE_PHI(mem_mask, MachineRepresentation::kWord32);
- }
-
#undef INTRODUCE_PHI
}
@@ -3467,10 +3422,6 @@ void WasmGraphBuilder::MergeInstanceCacheInto(WasmInstanceCacheNodes* to,
merge, to->mem_size, from->mem_size);
to->mem_start = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(),
merge, to->mem_start, from->mem_start);
- if (untrusted_code_mitigations_) {
- to->mem_mask = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(),
- merge, to->mem_mask, from->mem_mask);
- }
}
Node* WasmGraphBuilder::CreateOrMergeIntoPhi(MachineRepresentation rep,
@@ -3839,13 +3790,6 @@ WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
// Introduce the actual bounds check.
Node* cond = gasm_->UintLessThan(index, effective_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
-
- if (untrusted_code_mitigations_) {
- // In the fallthrough case, condition the index with the memory mask.
- Node* mem_mask = instance_cache_->mem_mask;
- DCHECK_NOT_NULL(mem_mask);
- index = gasm_->WordAnd(index, mem_mask);
- }
return {index, kDynamicallyChecked};
}
@@ -4345,13 +4289,6 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
gasm_->UintLessThan(index, mem_size), BranchHint::kTrue);
bounds_check.Chain(control());
- if (untrusted_code_mitigations_) {
- // Condition the index with the memory mask.
- Node* mem_mask = instance_cache_->mem_mask;
- DCHECK_NOT_NULL(mem_mask);
- index = gasm_->WordAnd(index, mem_mask);
- }
-
Node* load = graph()->NewNode(mcgraph()->machine()->Load(type), mem_start,
index, effect(), bounds_check.if_true);
SetEffectControl(bounds_check.EffectPhi(load, effect()), bounds_check.merge);
@@ -4396,13 +4333,6 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
BranchHint::kTrue);
bounds_check.Chain(control());
- if (untrusted_code_mitigations_) {
- // Condition the index with the memory mask.
- Node* mem_mask = instance_cache_->mem_mask;
- DCHECK_NOT_NULL(mem_mask);
- index = gasm_->Word32And(index, mem_mask);
- }
-
index = BuildChangeUint32ToUintPtr(index);
const Operator* store_op = mcgraph()->machine()->Store(StoreRepresentation(
type.representation(), WriteBarrierKind::kNoWriteBarrier));
@@ -5240,16 +5170,26 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
const Operator* (MachineOperatorBuilder::*)(MachineType);
using OperatorByRep =
const Operator* (MachineOperatorBuilder::*)(MachineRepresentation);
+ using OperatorByAtomicLoadRep =
+ const Operator* (MachineOperatorBuilder::*)(AtomicLoadParameters);
+ using OperatorByAtomicStoreRep =
+ const Operator* (MachineOperatorBuilder::*)(AtomicStoreParameters);
const Type type;
const MachineType machine_type;
const OperatorByType operator_by_type = nullptr;
const OperatorByRep operator_by_rep = nullptr;
+ const OperatorByAtomicLoadRep operator_by_atomic_load_params = nullptr;
+ const OperatorByAtomicStoreRep operator_by_atomic_store_rep = nullptr;
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByType o)
: type(t), machine_type(m), operator_by_type(o) {}
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByRep o)
: type(t), machine_type(m), operator_by_rep(o) {}
+ constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicLoadRep o)
+ : type(t), machine_type(m), operator_by_atomic_load_params(o) {}
+ constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicStoreRep o)
+ : type(t), machine_type(m), operator_by_atomic_store_rep(o) {}
// Constexpr, hence just a table lookup in most compilers.
static constexpr AtomicOpInfo Get(wasm::WasmOpcode opcode) {
@@ -5358,11 +5298,21 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
// {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
uintptr_t capped_offset = static_cast<uintptr_t>(offset);
if (info.type != AtomicOpInfo::kSpecial) {
- const Operator* op =
- info.operator_by_type
- ? (mcgraph()->machine()->*info.operator_by_type)(info.machine_type)
- : (mcgraph()->machine()->*info.operator_by_rep)(
- info.machine_type.representation());
+ const Operator* op;
+ if (info.operator_by_type) {
+ op = (mcgraph()->machine()->*info.operator_by_type)(info.machine_type);
+ } else if (info.operator_by_rep) {
+ op = (mcgraph()->machine()->*info.operator_by_rep)(
+ info.machine_type.representation());
+ } else if (info.operator_by_atomic_load_params) {
+ op = (mcgraph()->machine()->*info.operator_by_atomic_load_params)(
+ AtomicLoadParameters(info.machine_type, AtomicMemoryOrder::kSeqCst));
+ } else {
+ op = (mcgraph()->machine()->*info.operator_by_atomic_store_rep)(
+ AtomicStoreParameters(info.machine_type.representation(),
+ WriteBarrierKind::kNoWriteBarrier,
+ AtomicMemoryOrder::kSeqCst));
+ }
Node* input_nodes[6] = {MemBuffer(capped_offset), index};
int num_actual_inputs = info.type;
@@ -5610,13 +5560,16 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
wasm::WasmCodePosition position) {
TrapIfFalse(wasm::kTrapArrayTooLarge,
gasm_->Uint32LessThanOrEqual(
- length, gasm_->Uint32Constant(wasm::kV8MaxWasmArrayLength)),
+ length, gasm_->Uint32Constant(WasmArray::MaxLength(type))),
position);
wasm::ValueType element_type = type->element_type();
Builtin stub = ChooseArrayAllocationBuiltin(element_type, initial_value);
- Node* a =
- gasm_->CallBuiltin(stub, Operator::kEliminatable, rtt, length,
- Int32Constant(element_type.element_size_bytes()));
+ // Do NOT mark this as Operator::kEliminatable, because that would cause the
+ // Call node to have no control inputs, which means it could get scheduled
+ // before the check/trap above.
+ Node* a = gasm_->CallBuiltin(
+ stub, Operator::kNoDeopt | Operator::kNoThrow, rtt, length,
+ Int32Constant(element_type.element_size_bytes()));
if (initial_value != nullptr) {
// TODO(manoskouk): If the loop is ever removed here, we have to update
// ArrayNewWithRtt() in graph-builder-interface.cc to not mark the current
@@ -5628,8 +5581,6 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
Node* element_size = Int32Constant(element_type.element_size_bytes());
Node* end_offset =
gasm_->Int32Add(start_offset, gasm_->Int32Mul(element_size, length));
- // Loops need the graph's end to have been set up.
- gasm_->EnsureEnd();
gasm_->Goto(&loop, start_offset);
gasm_->Bind(&loop);
{
@@ -6005,24 +5956,33 @@ Node* WasmGraphBuilder::ArrayLen(Node* array_object, CheckForNull null_check,
return gasm_->LoadWasmArrayLength(array_object);
}
-// TODO(7748): Change {CallBuiltin} to {BuildCCall}. Add an option to copy in a
-// loop for small array sizes. To find the length limit, run
-// test/mjsunit/wasm/array-copy-benchmark.js.
+// TODO(7748): Add an option to copy in a loop for small array sizes. To find
+// the length limit, run test/mjsunit/wasm/array-copy-benchmark.js.
void WasmGraphBuilder::ArrayCopy(Node* dst_array, Node* dst_index,
- Node* src_array, Node* src_index, Node* length,
+ CheckForNull dst_null_check, Node* src_array,
+ Node* src_index, CheckForNull src_null_check,
+ Node* length,
wasm::WasmCodePosition position) {
- // TODO(7748): Skip null checks when possible.
- TrapIfTrue(wasm::kTrapNullDereference, gasm_->WordEqual(dst_array, RefNull()),
- position);
- TrapIfTrue(wasm::kTrapNullDereference, gasm_->WordEqual(src_array, RefNull()),
- position);
+ if (dst_null_check == kWithNullCheck) {
+ TrapIfTrue(wasm::kTrapNullDereference,
+ gasm_->WordEqual(dst_array, RefNull()), position);
+ }
+ if (src_null_check == kWithNullCheck) {
+ TrapIfTrue(wasm::kTrapNullDereference,
+ gasm_->WordEqual(src_array, RefNull()), position);
+ }
BoundsCheckArrayCopy(dst_array, dst_index, length, position);
BoundsCheckArrayCopy(src_array, src_index, length, position);
- Operator::Properties copy_properties =
- Operator::kIdempotent | Operator::kNoThrow | Operator::kNoDeopt;
- // The builtin needs the int parameters first.
- gasm_->CallBuiltin(Builtin::kWasmArrayCopy, copy_properties, dst_index,
- src_index, length, dst_array, src_array);
+
+ Node* function =
+ gasm_->ExternalConstant(ExternalReference::wasm_array_copy());
+ MachineType arg_types[]{
+ MachineType::TaggedPointer(), MachineType::TaggedPointer(),
+ MachineType::Uint32(), MachineType::TaggedPointer(),
+ MachineType::Uint32(), MachineType::Uint32()};
+ MachineSignature sig(0, 6, arg_types);
+ BuildCCall(&sig, function, GetInstance(), dst_array, dst_index, src_array,
+ src_index, length);
}
// 1 bit V8 Smi tag, 31 bits V8 Smi shift, 1 bit i31ref high-bit truncation.
@@ -6659,8 +6619,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// The (cached) call target is the jump table slot for that function.
args[0] = BuildLoadCallTargetFromExportedFunctionData(function_data);
BuildWasmCall(sig_, base::VectorOf(args), base::VectorOf(rets),
- wasm::kNoCodePosition, nullptr, kNoRetpoline,
- frame_state);
+ wasm::kNoCodePosition, nullptr, frame_state);
}
}
@@ -6929,8 +6888,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Convert wasm numbers to JS values.
pos = AddArgumentNodes(base::VectorOf(args), pos, wasm_count, sig_);
- args[pos++] = undefined_node; // new target
- args[pos++] = Int32Constant(wasm_count); // argument count
+ args[pos++] = undefined_node; // new target
+ args[pos++] =
+ Int32Constant(JSParameterCount(wasm_count)); // argument count
args[pos++] = function_context;
args[pos++] = effect();
args[pos++] = control();
@@ -6957,8 +6917,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
for (int i = wasm_count; i < expected_arity; ++i) {
args[pos++] = undefined_node;
}
- args[pos++] = undefined_node; // new target
- args[pos++] = Int32Constant(wasm_count); // argument count
+ args[pos++] = undefined_node; // new target
+ args[pos++] =
+ Int32Constant(JSParameterCount(wasm_count)); // argument count
Node* function_context =
gasm_->LoadContextFromJSFunction(callable_node);
@@ -6981,7 +6942,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] =
gasm_->GetBuiltinPointerTarget(Builtin::kCall_ReceiverIsAny);
args[pos++] = callable_node;
- args[pos++] = Int32Constant(wasm_count); // argument count
+ args[pos++] =
+ Int32Constant(JSParameterCount(wasm_count)); // argument count
args[pos++] = undefined_node; // receiver
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -7162,8 +7124,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
int pos = 0;
args[pos++] = gasm_->GetBuiltinPointerTarget(Builtin::kCall_ReceiverIsAny);
args[pos++] = callable;
- args[pos++] = Int32Constant(wasm_count); // argument count
- args[pos++] = UndefinedValue(); // receiver
+ args[pos++] =
+ Int32Constant(JSParameterCount(wasm_count)); // argument count
+ args[pos++] = UndefinedValue(); // receiver
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), CallTrampolineDescriptor{}, wasm_count + 1,
@@ -7457,7 +7420,7 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
return std::make_pair(WasmImportCallKind::kUseCallBuiltin, callable);
}
- if (shared->internal_formal_parameter_count() ==
+ if (shared->internal_formal_parameter_count_without_receiver() ==
expected_sig->parameter_count()) {
return std::make_pair(WasmImportCallKind::kJSFunctionArityMatch,
callable);
@@ -7623,8 +7586,7 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
// Schedule and compile to machine code.
CallDescriptor* incoming =
- GetWasmCallDescriptor(&zone, sig, WasmGraphBuilder::kNoRetpoline,
- WasmCallKind::kWasmImportWrapper);
+ GetWasmCallDescriptor(&zone, sig, WasmCallKind::kWasmImportWrapper);
if (machine->Is32()) {
incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
@@ -7665,8 +7627,7 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module,
// Run the compiler pipeline to generate machine code.
CallDescriptor* call_descriptor =
- GetWasmCallDescriptor(&zone, sig, WasmGraphBuilder::kNoRetpoline,
- WasmCallKind::kWasmCapiFunction);
+ GetWasmCallDescriptor(&zone, sig, WasmCallKind::kWasmCapiFunction);
if (mcgraph->machine()->Is32()) {
call_descriptor = GetI32WasmCallDescriptor(&zone, call_descriptor);
}
@@ -7676,13 +7637,18 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module,
call_descriptor, mcgraph, CodeKind::WASM_TO_CAPI_FUNCTION,
wasm::WasmCode::kWasmToCapiWrapper, debug_name,
WasmStubAssemblerOptions(), source_positions);
- std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
- wasm::kAnonymousFuncIndex, result.code_desc, result.frame_slot_count,
- result.tagged_parameter_slots,
- result.protected_instructions_data.as_vector(),
- result.source_positions.as_vector(), wasm::WasmCode::kWasmToCapiWrapper,
- wasm::ExecutionTier::kNone, wasm::kNoDebugging);
- return native_module->PublishCode(std::move(wasm_code));
+ wasm::WasmCode* published_code;
+ {
+ wasm::CodeSpaceWriteScope code_space_write_scope(native_module);
+ std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
+ wasm::kAnonymousFuncIndex, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots,
+ result.protected_instructions_data.as_vector(),
+ result.source_positions.as_vector(), wasm::WasmCode::kWasmToCapiWrapper,
+ wasm::ExecutionTier::kNone, wasm::kNoDebugging);
+ published_code = native_module->PublishCode(std::move(wasm_code));
+ }
+ return published_code;
}
MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
@@ -7716,8 +7682,7 @@ MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
// Generate the call descriptor.
CallDescriptor* incoming =
- GetWasmCallDescriptor(zone.get(), sig, WasmGraphBuilder::kNoRetpoline,
- WasmCallKind::kWasmImportWrapper);
+ GetWasmCallDescriptor(zone.get(), sig, WasmCallKind::kWasmImportWrapper);
// Run the compilation job synchronously.
std::unique_ptr<OptimizedCompilationJob> job(
@@ -7851,9 +7816,10 @@ bool BuildGraphForWasmFunction(wasm::CompilationEnv* env,
WasmGraphBuilder builder(env, mcgraph->zone(), mcgraph, func_body.sig,
source_positions);
auto* allocator = wasm::GetWasmEngine()->allocator();
- wasm::VoidResult graph_construction_result = wasm::BuildTFGraph(
- allocator, env->enabled_features, env->module, &builder, detected,
- func_body, loop_infos, node_origins, func_index);
+ wasm::VoidResult graph_construction_result =
+ wasm::BuildTFGraph(allocator, env->enabled_features, env->module,
+ &builder, detected, func_body, loop_infos,
+ node_origins, func_index, wasm::kInstrumentEndpoints);
if (graph_construction_result.failed()) {
if (FLAG_trace_wasm_compiler) {
StdoutStream{} << "Compilation failed: "
@@ -7886,8 +7852,9 @@ base::Vector<const char> GetDebugName(Zone* zone, int index) {
} // namespace
wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
- wasm::CompilationEnv* env, const wasm::FunctionBody& func_body,
- int func_index, Counters* counters, wasm::WasmFeatures* detected) {
+ wasm::CompilationEnv* env, const wasm::WireBytesStorage* wire_bytes_storage,
+ const wasm::FunctionBody& func_body, int func_index, Counters* counters,
+ wasm::WasmFeatures* detected) {
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.CompileTopTier", "func_index", func_index, "body_size",
func_body.end - func_body.start);
@@ -7939,9 +7906,10 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
call_descriptor = GetI32WasmCallDescriptorForSimd(&zone, call_descriptor);
}
- Pipeline::GenerateCodeForWasmFunction(
- &info, mcgraph, call_descriptor, source_positions, node_origins,
- func_body, env->module, func_index, &loop_infos);
+ Pipeline::GenerateCodeForWasmFunction(&info, env, wire_bytes_storage, mcgraph,
+ call_descriptor, source_positions,
+ node_origins, func_body, env->module,
+ func_index, &loop_infos);
if (counters) {
int zone_bytes =
@@ -7997,10 +7965,9 @@ class LinkageLocationAllocator {
} // namespace
// General code uses the above configuration data.
-CallDescriptor* GetWasmCallDescriptor(
- Zone* zone, const wasm::FunctionSig* fsig,
- WasmGraphBuilder::UseRetpoline use_retpoline, WasmCallKind call_kind,
- bool need_frame_state) {
+CallDescriptor* GetWasmCallDescriptor(Zone* zone, const wasm::FunctionSig* fsig,
+ WasmCallKind call_kind,
+ bool need_frame_state) {
// The extra here is to accomodate the instance object as first parameter
// and, when specified, the additional callable.
bool extra_callable_param =
@@ -8078,10 +8045,9 @@ CallDescriptor* GetWasmCallDescriptor(
descriptor_kind = CallDescriptor::kCallWasmCapiFunction;
}
- CallDescriptor::Flags flags =
- use_retpoline ? CallDescriptor::kRetpoline
- : need_frame_state ? CallDescriptor::kNeedsFrameState
- : CallDescriptor::kNoFlags;
+ CallDescriptor::Flags flags = need_frame_state
+ ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags;
return zone->New<CallDescriptor>( // --
descriptor_kind, // kind
target_type, // target MachineType
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 71e3111c8c..328152b363 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -53,13 +53,15 @@ using TFNode = compiler::Node;
using TFGraph = compiler::MachineGraph;
class WasmCode;
class WasmFeatures;
+class WireBytesStorage;
enum class LoadTransformationKind : uint8_t;
} // namespace wasm
namespace compiler {
wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
- wasm::CompilationEnv*, const wasm::FunctionBody&, int func_index, Counters*,
+ wasm::CompilationEnv*, const wasm::WireBytesStorage* wire_bytes_storage,
+ const wasm::FunctionBody&, int func_index, Counters*,
wasm::WasmFeatures* detected);
// Calls to Wasm imports are handled in several different ways, depending on the
@@ -176,7 +178,6 @@ class JSWasmCallData {
struct WasmInstanceCacheNodes {
Node* mem_start;
Node* mem_size;
- Node* mem_mask;
};
struct WasmLoopInfo {
@@ -207,10 +208,6 @@ class WasmGraphBuilder {
kNeedsBoundsCheck = true,
kCanOmitBoundsCheck = false
};
- enum UseRetpoline : bool { // --
- kRetpoline = true,
- kNoRetpoline = false
- };
enum CheckForNull : bool { // --
kWithNullCheck = true,
kWithoutNullCheck = false
@@ -474,9 +471,9 @@ class WasmGraphBuilder {
wasm::WasmCodePosition position);
Node* ArrayLen(Node* array_object, CheckForNull null_check,
wasm::WasmCodePosition position);
- void ArrayCopy(Node* dst_array, Node* dst_index, Node* src_array,
- Node* src_index, Node* length,
- wasm::WasmCodePosition position);
+ void ArrayCopy(Node* dst_array, Node* dst_index, CheckForNull dst_null_check,
+ Node* src_array, Node* src_index, CheckForNull src_null_check,
+ Node* length, wasm::WasmCodePosition position);
Node* I31New(Node* input);
Node* I31GetS(Node* input);
Node* I31GetU(Node* input);
@@ -576,12 +573,11 @@ class WasmGraphBuilder {
IsReturnCall continuation);
Node* BuildWasmCall(const wasm::FunctionSig* sig, base::Vector<Node*> args,
base::Vector<Node*> rets, wasm::WasmCodePosition position,
- Node* instance_node, UseRetpoline use_retpoline,
- Node* frame_state = nullptr);
+ Node* instance_node, Node* frame_state = nullptr);
Node* BuildWasmReturnCall(const wasm::FunctionSig* sig,
base::Vector<Node*> args,
wasm::WasmCodePosition position,
- Node* instance_node, UseRetpoline use_retpoline);
+ Node* instance_node);
Node* BuildImportCall(const wasm::FunctionSig* sig, base::Vector<Node*> args,
base::Vector<Node*> rets,
wasm::WasmCodePosition position, int func_index,
@@ -765,7 +761,6 @@ class WasmGraphBuilder {
bool use_js_isolate_and_params() const { return isolate_ != nullptr; }
bool has_simd_ = false;
bool needs_stack_check_ = false;
- const bool untrusted_code_mitigations_ = true;
const wasm::FunctionSig* const sig_;
@@ -791,8 +786,6 @@ V8_EXPORT_PRIVATE void BuildInlinedJSToWasmWrapper(
V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(
Zone* zone, const wasm::FunctionSig* signature,
- WasmGraphBuilder::UseRetpoline use_retpoline =
- WasmGraphBuilder::kNoRetpoline,
WasmCallKind kind = kWasmFunction, bool need_frame_state = false);
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor(
diff --git a/deps/v8/src/compiler/wasm-inlining.cc b/deps/v8/src/compiler/wasm-inlining.cc
new file mode 100644
index 0000000000..6753769953
--- /dev/null
+++ b/deps/v8/src/compiler/wasm-inlining.cc
@@ -0,0 +1,195 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/wasm-inlining.h"
+
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/wasm-compiler.h"
+#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/graph-builder-interface.h"
+#include "src/wasm/wasm-features.h"
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction WasmInliner::Reduce(Node* node) {
+ if (node->opcode() == IrOpcode::kCall) {
+ return ReduceCall(node);
+ } else {
+ return NoChange();
+ }
+}
+
+// TODO(12166): Abstract over a heuristics provider.
+Reduction WasmInliner::ReduceCall(Node* call) {
+ Node* callee = NodeProperties::GetValueInput(call, 0);
+ IrOpcode::Value reloc_opcode = mcgraph_->machine()->Is32()
+ ? IrOpcode::kRelocatableInt32Constant
+ : IrOpcode::kRelocatableInt64Constant;
+ if (callee->opcode() != reloc_opcode) return NoChange();
+ auto info = OpParameter<RelocatablePtrConstantInfo>(callee->op());
+ if (static_cast<uint32_t>(info.value()) != inlinee_index_) return NoChange();
+
+ CHECK_LT(inlinee_index_, module()->functions.size());
+ const wasm::WasmFunction* function = &module()->functions[inlinee_index_];
+ base::Vector<const byte> function_bytes =
+ wire_bytes_->GetCode(function->code);
+ const wasm::FunctionBody inlinee_body(function->sig, function->code.offset(),
+ function_bytes.begin(),
+ function_bytes.end());
+ wasm::WasmFeatures detected;
+ WasmGraphBuilder builder(env_, zone(), mcgraph_, inlinee_body.sig, spt_);
+ std::vector<WasmLoopInfo> infos;
+
+ wasm::DecodeResult result;
+ Node* inlinee_start;
+ Node* inlinee_end;
+ {
+ Graph::SubgraphScope scope(graph());
+ result = wasm::BuildTFGraph(zone()->allocator(), env_->enabled_features,
+ module(), &builder, &detected, inlinee_body,
+ &infos, node_origins_, inlinee_index_,
+ wasm::kDoNotInstrumentEndpoints);
+ inlinee_start = graph()->start();
+ inlinee_end = graph()->end();
+ }
+
+ if (result.failed()) return NoChange();
+ return InlineCall(call, inlinee_start, inlinee_end);
+}
+
+// TODO(12166): Handle exceptions and tail calls.
+Reduction WasmInliner::InlineCall(Node* call, Node* callee_start,
+ Node* callee_end) {
+ DCHECK_EQ(call->opcode(), IrOpcode::kCall);
+
+ /* 1) Rewire callee formal parameters to the call-site real parameters. Rewire
+ * effect and control dependencies of callee's start node with the respective
+ * inputs of the call node.
+ */
+ Node* control = NodeProperties::GetControlInput(call);
+ Node* effect = NodeProperties::GetEffectInput(call);
+
+ for (Edge edge : callee_start->use_edges()) {
+ Node* use = edge.from();
+ switch (use->opcode()) {
+ case IrOpcode::kParameter: {
+ // Index 0 is the callee node.
+ int index = 1 + ParameterIndexOf(use->op());
+ Replace(use, NodeProperties::GetValueInput(call, index));
+ break;
+ }
+ default:
+ if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ } else if (NodeProperties::IsControlEdge(edge)) {
+ edge.UpdateTo(control);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ }
+
+ /* 2) Rewire uses of the call node to the return values of the callee. Since
+ * there might be multiple return nodes in the callee, we have to create Merge
+ * and Phi nodes for them.
+ */
+ NodeVector return_nodes(zone());
+ for (Node* const input : callee_end->inputs()) {
+ DCHECK(IrOpcode::IsGraphTerminator(input->opcode()));
+ switch (input->opcode()) {
+ case IrOpcode::kReturn:
+ return_nodes.push_back(input);
+ break;
+ case IrOpcode::kDeoptimize:
+ case IrOpcode::kTerminate:
+ case IrOpcode::kThrow:
+ NodeProperties::MergeControlToEnd(graph(), common(), input);
+ Revisit(graph()->end());
+ break;
+ case IrOpcode::kTailCall:
+ // TODO(12166): A tail call in the inlined function has to be
+ // transformed into a regular call in the caller function.
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ if (return_nodes.size() > 0) {
+ int const return_count = static_cast<int>(return_nodes.size());
+ NodeVector controls(zone());
+ NodeVector effects(zone());
+ for (Node* const return_node : return_nodes) {
+ controls.push_back(NodeProperties::GetControlInput(return_node));
+ effects.push_back(NodeProperties::GetEffectInput(return_node));
+ }
+ Node* control_output = graph()->NewNode(common()->Merge(return_count),
+ return_count, &controls.front());
+ effects.push_back(control_output);
+ Node* effect_output =
+ graph()->NewNode(common()->EffectPhi(return_count),
+ static_cast<int>(effects.size()), &effects.front());
+
+ // The first input of a return node is discarded. This is because Wasm
+ // functions always return an additional 0 constant as a first return value.
+ DCHECK(
+ Int32Matcher(NodeProperties::GetValueInput(return_nodes[0], 0)).Is(0));
+ int const return_arity = return_nodes[0]->op()->ValueInputCount() - 1;
+ NodeVector values(zone());
+ for (int i = 0; i < return_arity; i++) {
+ NodeVector ith_values(zone());
+ for (Node* const return_node : return_nodes) {
+ Node* value = NodeProperties::GetValueInput(return_node, i + 1);
+ ith_values.push_back(value);
+ }
+ ith_values.push_back(control_output);
+ // Find the correct machine representation for the return values from the
+ // inlinee signature.
+ const wasm::WasmFunction* function = &module()->functions[inlinee_index_];
+ MachineRepresentation repr =
+ function->sig->GetReturn(i).machine_representation();
+ Node* ith_value_output = graph()->NewNode(
+ common()->Phi(repr, return_count),
+ static_cast<int>(ith_values.size()), &ith_values.front());
+ values.push_back(ith_value_output);
+ }
+
+ if (return_arity == 0) {
+ // Void function, no value uses.
+ ReplaceWithValue(call, mcgraph()->Dead(), effect_output, control_output);
+ } else if (return_arity == 1) {
+ // One return value. Just replace value uses of the call node with it.
+ ReplaceWithValue(call, values[0], effect_output, control_output);
+ } else {
+ // Multiple returns. We have to find the projections of the call node and
+ // replace them with the returned values.
+ for (Edge use_edge : call->use_edges()) {
+ if (NodeProperties::IsValueEdge(use_edge)) {
+ Node* use = use_edge.from();
+ DCHECK_EQ(use->opcode(), IrOpcode::kProjection);
+ ReplaceWithValue(use, values[ProjectionIndexOf(use->op())]);
+ }
+ }
+ // All value inputs are replaced by the above loop, so it is ok to use
+ // Dead() as a dummy for value replacement.
+ ReplaceWithValue(call, mcgraph()->Dead(), effect_output, control_output);
+ }
+ return Replace(mcgraph()->Dead());
+ } else {
+ // The callee can never return. The call node and all its uses are dead.
+ ReplaceWithValue(call, mcgraph()->Dead(), mcgraph()->Dead(),
+ mcgraph()->Dead());
+ return Changed(call);
+ }
+}
+
+const wasm::WasmModule* WasmInliner::module() const { return env_->module; }
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/wasm-inlining.h b/deps/v8/src/compiler/wasm-inlining.h
new file mode 100644
index 0000000000..8b31b6b291
--- /dev/null
+++ b/deps/v8/src/compiler/wasm-inlining.h
@@ -0,0 +1,77 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
+#ifndef V8_COMPILER_WASM_INLINING_H_
+#define V8_COMPILER_WASM_INLINING_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+
+namespace v8 {
+namespace internal {
+
+namespace wasm {
+struct CompilationEnv;
+struct WasmModule;
+class WireBytesStorage;
+} // namespace wasm
+
+class BytecodeOffset;
+class OptimizedCompilationInfo;
+
+namespace compiler {
+
+class NodeOriginTable;
+class SourcePositionTable;
+
+// The WasmInliner provides the core graph inlining machinery for Webassembly
+// graphs. Note that this class only deals with the mechanics of how to inline
+// one graph into another, heuristics that decide what and how much to inline
+// are beyond its scope. As a current placeholder, only a function at specific
+// given index {inlinee_index} is inlined.
+class WasmInliner final : public AdvancedReducer {
+ public:
+ WasmInliner(Editor* editor, wasm::CompilationEnv* env,
+ SourcePositionTable* spt, NodeOriginTable* node_origins,
+ MachineGraph* mcgraph, const wasm::WireBytesStorage* wire_bytes,
+ uint32_t inlinee_index)
+ : AdvancedReducer(editor),
+ env_(env),
+ spt_(spt),
+ node_origins_(node_origins),
+ mcgraph_(mcgraph),
+ wire_bytes_(wire_bytes),
+ inlinee_index_(inlinee_index) {}
+
+ const char* reducer_name() const override { return "WasmInliner"; }
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Zone* zone() const { return mcgraph_->zone(); }
+ CommonOperatorBuilder* common() const { return mcgraph_->common(); }
+ Graph* graph() const { return mcgraph_->graph(); }
+ MachineGraph* mcgraph() const { return mcgraph_; }
+ const wasm::WasmModule* module() const;
+
+ Reduction ReduceCall(Node* call);
+ Reduction InlineCall(Node* call, Node* callee_start, Node* callee_end);
+
+ wasm::CompilationEnv* const env_;
+ SourcePositionTable* const spt_;
+ NodeOriginTable* const node_origins_;
+ MachineGraph* const mcgraph_;
+ const wasm::WireBytesStorage* const wire_bytes_;
+ const uint32_t inlinee_index_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_WASM_INLINING_H_
diff --git a/deps/v8/src/d8/async-hooks-wrapper.cc b/deps/v8/src/d8/async-hooks-wrapper.cc
index 84191b9815..13b67ce8ea 100644
--- a/deps/v8/src/d8/async-hooks-wrapper.cc
+++ b/deps/v8/src/d8/async-hooks-wrapper.cc
@@ -3,6 +3,11 @@
// found in the LICENSE file.
#include "src/d8/async-hooks-wrapper.h"
+
+#include "include/v8-function.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive.h"
+#include "include/v8-template.h"
#include "src/d8/d8.h"
#include "src/execution/isolate-inl.h"
@@ -120,66 +125,74 @@ Local<Object> AsyncHooks::CreateHook(
void AsyncHooks::ShellPromiseHook(PromiseHookType type, Local<Promise> promise,
Local<Value> parent) {
- AsyncHooks* hooks =
- PerIsolateData::Get(promise->GetIsolate())->GetAsyncHooks();
-
- HandleScope handle_scope(hooks->isolate_);
-
- Local<Context> currentContext = hooks->isolate_->GetCurrentContext();
- DCHECK(!currentContext.IsEmpty());
+ v8::Isolate* isolate = promise->GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- if (type == PromiseHookType::kInit) {
- ++hooks->current_async_id;
- Local<Integer> async_id =
- Integer::New(hooks->isolate_, hooks->current_async_id);
-
- CHECK(!promise
- ->HasPrivate(currentContext,
- hooks->async_id_smb.Get(hooks->isolate_))
+ AsyncHooks* hooks = PerIsolateData::Get(isolate)->GetAsyncHooks();
+ HandleScope handle_scope(isolate);
+ // Temporarily clear any scheduled_exception to allow evaluating JS that can
+ // throw.
+ i::Handle<i::Object> scheduled_exception;
+ if (i_isolate->has_scheduled_exception()) {
+ scheduled_exception = handle(i_isolate->scheduled_exception(), i_isolate);
+ i_isolate->clear_scheduled_exception();
+ }
+ {
+ TryCatch try_catch(isolate);
+ try_catch.SetVerbose(true);
+
+ Local<Context> currentContext = isolate->GetCurrentContext();
+ DCHECK(!currentContext.IsEmpty());
+
+ if (type == PromiseHookType::kInit) {
+ ++hooks->current_async_id;
+ Local<Integer> async_id = Integer::New(isolate, hooks->current_async_id);
+ CHECK(
+ !promise->HasPrivate(currentContext, hooks->async_id_smb.Get(isolate))
.ToChecked());
- promise->SetPrivate(currentContext,
- hooks->async_id_smb.Get(hooks->isolate_), async_id);
-
- if (parent->IsPromise()) {
- Local<Promise> parent_promise = parent.As<Promise>();
- Local<Value> parent_async_id =
- parent_promise
- ->GetPrivate(hooks->isolate_->GetCurrentContext(),
- hooks->async_id_smb.Get(hooks->isolate_))
- .ToLocalChecked();
- promise->SetPrivate(currentContext,
- hooks->trigger_id_smb.Get(hooks->isolate_),
- parent_async_id);
- } else {
- CHECK(parent->IsUndefined());
- Local<Integer> trigger_id = Integer::New(hooks->isolate_, 0);
- promise->SetPrivate(currentContext,
- hooks->trigger_id_smb.Get(hooks->isolate_),
- trigger_id);
+ promise->SetPrivate(currentContext, hooks->async_id_smb.Get(isolate),
+ async_id);
+
+ if (parent->IsPromise()) {
+ Local<Promise> parent_promise = parent.As<Promise>();
+ Local<Value> parent_async_id =
+ parent_promise
+ ->GetPrivate(currentContext, hooks->async_id_smb.Get(isolate))
+ .ToLocalChecked();
+ promise->SetPrivate(currentContext, hooks->trigger_id_smb.Get(isolate),
+ parent_async_id);
+ } else {
+ CHECK(parent->IsUndefined());
+ promise->SetPrivate(currentContext, hooks->trigger_id_smb.Get(isolate),
+ Integer::New(isolate, 0));
+ }
+ } else if (type == PromiseHookType::kBefore) {
+ AsyncContext ctx;
+ ctx.execution_async_id =
+ promise->GetPrivate(currentContext, hooks->async_id_smb.Get(isolate))
+ .ToLocalChecked()
+ .As<Integer>()
+ ->Value();
+ ctx.trigger_async_id =
+ promise
+ ->GetPrivate(currentContext, hooks->trigger_id_smb.Get(isolate))
+ .ToLocalChecked()
+ .As<Integer>()
+ ->Value();
+ hooks->asyncContexts.push(ctx);
+ } else if (type == PromiseHookType::kAfter) {
+ hooks->asyncContexts.pop();
+ }
+ if (!i::StackLimitCheck{i_isolate}.HasOverflowed()) {
+ for (AsyncHooksWrap* wrap : hooks->async_wraps_) {
+ PromiseHookDispatch(type, promise, parent, wrap, hooks);
+ if (try_catch.HasCaught()) break;
+ }
+ if (try_catch.HasCaught()) Shell::ReportException(isolate, &try_catch);
}
- } else if (type == PromiseHookType::kBefore) {
- AsyncContext ctx;
- ctx.execution_async_id =
- promise
- ->GetPrivate(hooks->isolate_->GetCurrentContext(),
- hooks->async_id_smb.Get(hooks->isolate_))
- .ToLocalChecked()
- .As<Integer>()
- ->Value();
- ctx.trigger_async_id =
- promise
- ->GetPrivate(hooks->isolate_->GetCurrentContext(),
- hooks->trigger_id_smb.Get(hooks->isolate_))
- .ToLocalChecked()
- .As<Integer>()
- ->Value();
- hooks->asyncContexts.push(ctx);
- } else if (type == PromiseHookType::kAfter) {
- hooks->asyncContexts.pop();
}
-
- for (AsyncHooksWrap* wrap : hooks->async_wraps_) {
- PromiseHookDispatch(type, promise, parent, wrap, hooks);
+ if (!scheduled_exception.is_null()) {
+ i_isolate->set_scheduled_exception(*scheduled_exception);
}
}
@@ -215,28 +228,14 @@ void AsyncHooks::PromiseHookDispatch(PromiseHookType type,
Local<Promise> promise,
Local<Value> parent, AsyncHooksWrap* wrap,
AsyncHooks* hooks) {
- if (!wrap->IsEnabled()) {
- return;
- }
+ if (!wrap->IsEnabled()) return;
+ v8::Isolate* v8_isolate = hooks->isolate_;
+ HandleScope handle_scope(v8_isolate);
- HandleScope handle_scope(hooks->isolate_);
-
- TryCatch try_catch(hooks->isolate_);
- try_catch.SetVerbose(true);
-
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(hooks->isolate_);
- if (isolate->has_scheduled_exception()) {
- isolate->ScheduleThrow(isolate->scheduled_exception());
-
- DCHECK(try_catch.HasCaught());
- Shell::ReportException(hooks->isolate_, &try_catch);
- return;
- }
-
- Local<Value> rcv = Undefined(hooks->isolate_);
- Local<Context> context = hooks->isolate_->GetCurrentContext();
+ Local<Value> rcv = Undefined(v8_isolate);
+ Local<Context> context = v8_isolate->GetCurrentContext();
Local<Value> async_id =
- promise->GetPrivate(context, hooks->async_id_smb.Get(hooks->isolate_))
+ promise->GetPrivate(context, hooks->async_id_smb.Get(v8_isolate))
.ToLocalChecked();
Local<Value> args[1] = {async_id};
@@ -245,28 +244,31 @@ void AsyncHooks::PromiseHookDispatch(PromiseHookType type,
MaybeLocal<Value> result;
// Sacrifice the brevity for readability and debugfulness
- if (type == PromiseHookType::kInit) {
- if (!wrap->init_function().IsEmpty()) {
- Local<Value> initArgs[4] = {
- async_id, String::NewFromUtf8Literal(hooks->isolate_, "PROMISE"),
- promise
- ->GetPrivate(context, hooks->trigger_id_smb.Get(hooks->isolate_))
- .ToLocalChecked(),
- promise};
- result = wrap->init_function()->Call(context, rcv, 4, initArgs);
- }
- } else if (type == PromiseHookType::kBefore) {
- if (!wrap->before_function().IsEmpty()) {
- result = wrap->before_function()->Call(context, rcv, 1, args);
- }
- } else if (type == PromiseHookType::kAfter) {
- if (!wrap->after_function().IsEmpty()) {
- result = wrap->after_function()->Call(context, rcv, 1, args);
- }
- } else if (type == PromiseHookType::kResolve) {
- if (!wrap->promiseResolve_function().IsEmpty()) {
- result = wrap->promiseResolve_function()->Call(context, rcv, 1, args);
- }
+ switch (type) {
+ case PromiseHookType::kInit:
+ if (!wrap->init_function().IsEmpty()) {
+ Local<Value> initArgs[4] = {
+ async_id, String::NewFromUtf8Literal(v8_isolate, "PROMISE"),
+ promise->GetPrivate(context, hooks->trigger_id_smb.Get(v8_isolate))
+ .ToLocalChecked(),
+ promise};
+ result = wrap->init_function()->Call(context, rcv, 4, initArgs);
+ }
+ break;
+ case PromiseHookType::kBefore:
+ if (!wrap->before_function().IsEmpty()) {
+ result = wrap->before_function()->Call(context, rcv, 1, args);
+ }
+ break;
+ case PromiseHookType::kAfter:
+ if (!wrap->after_function().IsEmpty()) {
+ result = wrap->after_function()->Call(context, rcv, 1, args);
+ }
+ break;
+ case PromiseHookType::kResolve:
+ if (!wrap->promiseResolve_function().IsEmpty()) {
+ result = wrap->promiseResolve_function()->Call(context, rcv, 1, args);
+ }
}
}
diff --git a/deps/v8/src/d8/async-hooks-wrapper.h b/deps/v8/src/d8/async-hooks-wrapper.h
index f339b6e316..23cc0be9c0 100644
--- a/deps/v8/src/d8/async-hooks-wrapper.h
+++ b/deps/v8/src/d8/async-hooks-wrapper.h
@@ -7,11 +7,18 @@
#include <stack>
-#include "include/v8.h"
+#include "include/v8-function-callback.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-promise.h"
#include "src/objects/objects.h"
namespace v8 {
+class Function;
+class Isolate;
+class ObjectTemplate;
+class Value;
+
using async_id_t = double;
struct AsyncContext {
diff --git a/deps/v8/src/d8/d8-platforms.cc b/deps/v8/src/d8/d8-platforms.cc
index 722b2bc4e2..cd48a35bbd 100644
--- a/deps/v8/src/d8/d8-platforms.cc
+++ b/deps/v8/src/d8/d8-platforms.cc
@@ -82,7 +82,14 @@ class PredictablePlatform final : public Platform {
}
double MonotonicallyIncreasingTime() override {
- return synthetic_time_in_sec_ += 0.00001;
+ // In predictable mode, there should be no (observable) concurrency, but we
+ // still run some tests that explicitly specify '--predictable' in the
+ // '--isolates' variant, where several threads run the same test in
+ // different isolates. To avoid TSan issues in that scenario we use atomic
+ // increments here.
+ uint64_t synthetic_time =
+ synthetic_time_.fetch_add(1, std::memory_order_relaxed);
+ return 1e-5 * synthetic_time;
}
double CurrentClockTimeMillis() override {
@@ -96,7 +103,7 @@ class PredictablePlatform final : public Platform {
Platform* platform() const { return platform_.get(); }
private:
- double synthetic_time_in_sec_ = 0.0;
+ std::atomic<uint64_t> synthetic_time_{0};
std::unique_ptr<Platform> platform_;
};
diff --git a/deps/v8/src/d8/d8-posix.cc b/deps/v8/src/d8/d8-posix.cc
index 05e475f538..8a031ccdc0 100644
--- a/deps/v8/src/d8/d8-posix.cc
+++ b/deps/v8/src/d8/d8-posix.cc
@@ -16,6 +16,8 @@
#include <sys/wait.h>
#include <unistd.h>
+#include "include/v8-container.h"
+#include "include/v8-template.h"
#include "src/base/platform/wrappers.h"
#include "src/d8/d8.h"
diff --git a/deps/v8/src/d8/d8-test.cc b/deps/v8/src/d8/d8-test.cc
index 635a1f4514..6202c397ec 100644
--- a/deps/v8/src/d8/d8-test.cc
+++ b/deps/v8/src/d8/d8-test.cc
@@ -5,6 +5,7 @@
#include "src/d8/d8.h"
#include "include/v8-fast-api-calls.h"
+#include "include/v8-template.h"
#include "src/api/api-inl.h"
// This file exposes a d8.test.fast_c_api object, which adds testing facility
@@ -94,10 +95,10 @@ class FastCApiObject {
#ifdef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
typedef double Type;
- static constexpr CTypeInfo type_info = CTypeInfo(CTypeInfo::Type::kFloat64);
+#define type_info kTypeInfoFloat64
#else
typedef int32_t Type;
- static constexpr CTypeInfo type_info = CTypeInfo(CTypeInfo::Type::kInt32);
+#define type_info kTypeInfoInt32
#endif // V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
static Type AddAllSequenceFastCallback(Local<Object> receiver,
bool should_fallback,
@@ -630,16 +631,19 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
SideEffectType::kHasSideEffect, &is_valid_api_object_c_func));
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "fast_call_count",
- FunctionTemplate::New(isolate, FastCApiObject::FastCallCount,
- Local<Value>(), signature));
+ FunctionTemplate::New(
+ isolate, FastCApiObject::FastCallCount, Local<Value>(), signature,
+ 1, ConstructorBehavior::kThrow, SideEffectType::kHasNoSideEffect));
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "slow_call_count",
- FunctionTemplate::New(isolate, FastCApiObject::SlowCallCount,
- Local<Value>(), signature));
+ FunctionTemplate::New(
+ isolate, FastCApiObject::SlowCallCount, Local<Value>(), signature,
+ 1, ConstructorBehavior::kThrow, SideEffectType::kHasNoSideEffect));
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "reset_counts",
FunctionTemplate::New(isolate, FastCApiObject::ResetCounts,
- Local<Value>(), signature));
+ Local<Value>(), signature, 1,
+ ConstructorBehavior::kThrow));
}
api_obj_ctor->InstanceTemplate()->SetInternalFieldCount(
FastCApiObject::kV8WrapperObjectIndex + 1);
diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc
index 2b831bc747..6d35be77b8 100644
--- a/deps/v8/src/d8/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -24,8 +24,12 @@
#include "include/libplatform/libplatform.h"
#include "include/libplatform/v8-tracing.h"
+#include "include/v8-function.h"
+#include "include/v8-initialization.h"
#include "include/v8-inspector.h"
+#include "include/v8-json.h"
#include "include/v8-profiler.h"
+#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
#include "src/base/cpu.h"
#include "src/base/logging.h"
@@ -166,7 +170,11 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
void* AllocateVM(size_t length) {
DCHECK_LE(kVMThreshold, length);
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ v8::PageAllocator* page_allocator = i::GetPlatformDataCagePageAllocator();
+#else
v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
+#endif
size_t page_size = page_allocator->AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
return i::AllocatePages(page_allocator, nullptr, allocated, page_size,
@@ -174,7 +182,11 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
}
void FreeVM(void* data, size_t length) {
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ v8::PageAllocator* page_allocator = i::GetPlatformDataCagePageAllocator();
+#else
v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
+#endif
size_t page_size = page_allocator->AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
CHECK(i::FreePages(page_allocator, data, allocated));
@@ -236,7 +248,7 @@ class MockArrayBufferAllocatiorWithLimit : public MockArrayBufferAllocator {
std::atomic<size_t> space_left_;
};
-#ifdef V8_OS_LINUX
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
// This is a mock allocator variant that provides a huge virtual allocation
// backed by a small real allocation that is repeatedly mapped. If you create an
@@ -329,7 +341,7 @@ class MultiMappedAllocator : public ArrayBufferAllocatorBase {
base::Mutex regions_mutex_;
};
-#endif // V8_OS_LINUX
+#endif // MULTI_MAPPED_ALLOCATOR_AVAILABLE
v8::Platform* g_default_platform;
std::unique_ptr<v8::Platform> g_platform;
@@ -846,16 +858,21 @@ std::string NormalizePath(const std::string& path,
std::string segment;
while (std::getline(segment_stream, segment, '/')) {
if (segment == "..") {
- segments.pop_back();
+ if (!segments.empty()) segments.pop_back();
} else if (segment != ".") {
segments.push_back(segment);
}
}
// Join path segments.
std::ostringstream os;
- std::copy(segments.begin(), segments.end() - 1,
- std::ostream_iterator<std::string>(os, "/"));
- os << *segments.rbegin();
+ if (segments.size() > 1) {
+ std::copy(segments.begin(), segments.end() - 1,
+ std::ostream_iterator<std::string>(os, "/"));
+ os << *segments.rbegin();
+ } else {
+ os << "/";
+ if (!segments.empty()) os << segments[0];
+ }
return os.str();
}
@@ -1995,8 +2012,14 @@ void Shell::TestVerifySourcePositions(
auto callable = i::Handle<i::JSFunctionOrBoundFunction>::cast(arg_handle);
while (callable->IsJSBoundFunction()) {
+ internal::DisallowGarbageCollection no_gc;
auto bound_function = i::Handle<i::JSBoundFunction>::cast(callable);
auto bound_target = bound_function->bound_target_function();
+ if (!bound_target.IsJSFunctionOrBoundFunction()) {
+ internal::AllowGarbageCollection allow_gc;
+ isolate->ThrowError("Expected function as bound target.");
+ return;
+ }
callable =
handle(i::JSFunctionOrBoundFunction::cast(bound_target), i_isolate);
}
@@ -2009,7 +2032,7 @@ void Shell::TestVerifySourcePositions(
i::Handle<i::BytecodeArray> bytecodes =
handle(function->shared().GetBytecodeArray(i_isolate), i_isolate);
i::interpreter::BytecodeArrayIterator bytecode_iterator(bytecodes);
- bool has_baseline = function->shared().HasBaselineData();
+ bool has_baseline = function->shared().HasBaselineCode();
i::Handle<i::ByteArray> bytecode_offsets;
std::unique_ptr<i::baseline::BytecodeOffsetIterator> offset_iterator;
if (has_baseline) {
@@ -2990,7 +3013,7 @@ Local<ObjectTemplate> Shell::CreateD8Template(Isolate* isolate) {
// Correctness fuzzing will attempt to compare results of tests with and
// without turbo_fast_api_calls, so we don't expose the fast_c_api
// constructor when --correctness_fuzzer_suppressions is on.
- if (i::FLAG_turbo_fast_api_calls &&
+ if (options.expose_fast_api && i::FLAG_turbo_fast_api_calls &&
!i::FLAG_correctness_fuzzer_suppressions) {
test_template->Set(isolate, "FastCAPI",
Shell::CreateTestFastCApiTemplate(isolate));
@@ -3166,13 +3189,15 @@ void Shell::WriteIgnitionDispatchCountersFile(v8::Isolate* isolate) {
Local<Context> context = Context::New(isolate);
Context::Scope context_scope(context);
- Local<Object> dispatch_counters = reinterpret_cast<i::Isolate*>(isolate)
- ->interpreter()
- ->GetDispatchCountersObject();
+ i::Handle<i::JSObject> dispatch_counters =
+ reinterpret_cast<i::Isolate*>(isolate)
+ ->interpreter()
+ ->GetDispatchCountersObject();
std::ofstream dispatch_counters_stream(
i::FLAG_trace_ignition_dispatches_output_file);
dispatch_counters_stream << *String::Utf8Value(
- isolate, JSON::Stringify(context, dispatch_counters).ToLocalChecked());
+ isolate, JSON::Stringify(context, Utils::ToLocal(dispatch_counters))
+ .ToLocalChecked());
}
namespace {
@@ -3491,15 +3516,9 @@ void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
isolate->ThrowError("Error reading file");
return;
}
- std::unique_ptr<v8::BackingStore> backing_store =
- ArrayBuffer::NewBackingStore(
- data, length,
- [](void* data, size_t length, void*) {
- delete[] reinterpret_cast<uint8_t*>(data);
- },
- nullptr);
- Local<v8::ArrayBuffer> buffer =
- ArrayBuffer::New(isolate, std::move(backing_store));
+ Local<v8::ArrayBuffer> buffer = ArrayBuffer::New(isolate, length);
+ memcpy(buffer->GetBackingStore()->Data(), data, length);
+ delete[] data;
args.GetReturnValue().Set(buffer);
}
@@ -4252,6 +4271,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--throws") == 0) {
options.expected_to_throw = true;
argv[i] = nullptr;
+ } else if (strcmp(argv[i], "--no-fail") == 0) {
+ options.no_fail = true;
+ argv[i] = nullptr;
} else if (strncmp(argv[i], "--icu-data-file=", 16) == 0) {
options.icu_data_file = argv[i] + 16;
argv[i] = nullptr;
@@ -4357,8 +4379,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.fuzzilli_coverage_statistics = true;
argv[i] = nullptr;
#endif
- } else if (strcmp(argv[i], "--fuzzy-module-file-extensions") == 0) {
- options.fuzzy_module_file_extensions = true;
+ } else if (strcmp(argv[i], "--no-fuzzy-module-file-extensions") == 0) {
+ DCHECK(options.fuzzy_module_file_extensions);
+ options.fuzzy_module_file_extensions = false;
argv[i] = nullptr;
#if defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
} else if (strcmp(argv[i], "--enable-system-instrumentation") == 0) {
@@ -4381,6 +4404,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.wasm_trap_handler = false;
argv[i] = nullptr;
#endif // V8_ENABLE_WEBASSEMBLY
+ } else if (strcmp(argv[i], "--expose-fast-api") == 0) {
+ options.expose_fast_api = true;
+ argv[i] = nullptr;
}
}
@@ -4404,10 +4430,15 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.mock_arraybuffer_allocator = i::FLAG_mock_arraybuffer_allocator;
options.mock_arraybuffer_allocator_limit =
i::FLAG_mock_arraybuffer_allocator_limit;
-#if V8_OS_LINUX
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
options.multi_mapped_mock_allocator = i::FLAG_multi_mapped_mock_allocator;
#endif
+ if (i::FLAG_stress_snapshot && options.expose_fast_api &&
+ check_d8_flag_contradictions) {
+ FATAL("Flag --expose-fast-api is incompatible with --stress-snapshot.");
+ }
+
// Set up isolated source groups.
options.isolate_sources = new SourceGroup[options.num_isolates];
SourceGroup* current = options.isolate_sources;
@@ -4501,7 +4532,8 @@ int Shell::RunMain(Isolate* isolate, bool last_run) {
Shell::unhandled_promise_rejections_.store(0);
}
// In order to finish successfully, success must be != expected_to_throw.
- return success == Shell::options.expected_to_throw ? 1 : 0;
+ if (Shell::options.no_fail) return 0;
+ return (success == Shell::options.expected_to_throw ? 1 : 0);
}
void Shell::CollectGarbage(Isolate* isolate) {
@@ -5019,7 +5051,7 @@ int Shell::Main(int argc, char* argv[]) {
options.thread_pool_size, v8::platform::IdleTaskSupport::kEnabled,
in_process_stack_dumping, std::move(tracing));
g_default_platform = g_platform.get();
- if (i::FLAG_verify_predictable) {
+ if (i::FLAG_predictable) {
g_platform = MakePredictablePlatform(std::move(g_platform));
}
if (options.stress_delay_tasks) {
@@ -5037,6 +5069,11 @@ int Shell::Main(int argc, char* argv[]) {
V8::SetFlagsFromString("--redirect-code-traces-to=code.asm");
}
v8::V8::InitializePlatform(g_platform.get());
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (!v8::V8::InitializeVirtualMemoryCage()) {
+ FATAL("Could not initialize the virtual memory cage");
+ }
+#endif
v8::V8::Initialize();
if (options.snapshot_blob) {
v8::V8::InitializeExternalStartupDataFromFile(options.snapshot_blob);
@@ -5053,19 +5090,19 @@ int Shell::Main(int argc, char* argv[]) {
memory_limit >= options.mock_arraybuffer_allocator_limit
? memory_limit
: std::numeric_limits<size_t>::max());
-#if V8_OS_LINUX
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
MultiMappedAllocator multi_mapped_mock_allocator;
-#endif // V8_OS_LINUX
+#endif
if (options.mock_arraybuffer_allocator) {
if (memory_limit) {
Shell::array_buffer_allocator = &mock_arraybuffer_allocator_with_limit;
} else {
Shell::array_buffer_allocator = &mock_arraybuffer_allocator;
}
-#if V8_OS_LINUX
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
} else if (options.multi_mapped_mock_allocator) {
Shell::array_buffer_allocator = &multi_mapped_mock_allocator;
-#endif // V8_OS_LINUX
+#endif
} else {
Shell::array_buffer_allocator = &shell_array_buffer_allocator;
}
diff --git a/deps/v8/src/d8/d8.h b/deps/v8/src/d8/d8.h
index 9d3cc4f6d2..77b3ca6679 100644
--- a/deps/v8/src/d8/d8.h
+++ b/deps/v8/src/d8/d8.h
@@ -14,6 +14,9 @@
#include <unordered_set>
#include <vector>
+#include "include/v8-array-buffer.h"
+#include "include/v8-isolate.h"
+#include "include/v8-script.h"
#include "src/base/once.h"
#include "src/base/platform/time.h"
#include "src/base/platform/wrappers.h"
@@ -24,7 +27,11 @@
namespace v8 {
+class BackingStore;
+class CompiledWasmModule;
class D8Console;
+class Message;
+class TryCatch;
enum class ModuleType { kJavaScript, kJSON, kInvalid };
@@ -385,14 +392,17 @@ class ShellOptions {
DisallowReassignment<bool> interactive_shell = {"shell", false};
bool test_shell = false;
DisallowReassignment<bool> expected_to_throw = {"throws", false};
+ DisallowReassignment<bool> no_fail = {"no-fail", false};
DisallowReassignment<bool> ignore_unhandled_promises = {
"ignore-unhandled-promises", false};
DisallowReassignment<bool> mock_arraybuffer_allocator = {
"mock-arraybuffer-allocator", false};
DisallowReassignment<size_t> mock_arraybuffer_allocator_limit = {
"mock-arraybuffer-allocator-limit", 0};
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
DisallowReassignment<bool> multi_mapped_mock_allocator = {
"multi-mapped-mock-allocator", false};
+#endif
DisallowReassignment<bool> enable_inspector = {"enable-inspector", false};
int num_isolates = 1;
DisallowReassignment<v8::ScriptCompiler::CompileOptions, true>
@@ -433,6 +443,7 @@ class ShellOptions {
#if V8_ENABLE_WEBASSEMBLY
DisallowReassignment<bool> wasm_trap_handler = {"wasm-trap-handler", true};
#endif // V8_ENABLE_WEBASSEMBLY
+ DisallowReassignment<bool> expose_fast_api = {"expose-fast-api", false};
};
class Shell : public i::AllStatic {
diff --git a/deps/v8/src/date/date.cc b/deps/v8/src/date/date.cc
index 250539e24c..9b0665aba0 100644
--- a/deps/v8/src/date/date.cc
+++ b/deps/v8/src/date/date.cc
@@ -455,5 +455,83 @@ DateCache::DST* DateCache::LeastRecentlyUsedDST(DST* skip) {
return result;
}
+namespace {
+
+// ES6 section 20.3.1.1 Time Values and Time Range
+const double kMinYear = -1000000.0;
+const double kMaxYear = -kMinYear;
+const double kMinMonth = -10000000.0;
+const double kMaxMonth = -kMinMonth;
+
+const double kMsPerDay = 86400000.0;
+
+const double kMsPerSecond = 1000.0;
+const double kMsPerMinute = 60000.0;
+const double kMsPerHour = 3600000.0;
+
+} // namespace
+
+double MakeDate(double day, double time) {
+ if (std::isfinite(day) && std::isfinite(time)) {
+ return time + day * kMsPerDay;
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+double MakeDay(double year, double month, double date) {
+ if ((kMinYear <= year && year <= kMaxYear) &&
+ (kMinMonth <= month && month <= kMaxMonth) && std::isfinite(date)) {
+ int y = FastD2I(year);
+ int m = FastD2I(month);
+ y += m / 12;
+ m %= 12;
+ if (m < 0) {
+ m += 12;
+ y -= 1;
+ }
+ DCHECK_LE(0, m);
+ DCHECK_LT(m, 12);
+
+ // kYearDelta is an arbitrary number such that:
+ // a) kYearDelta = -1 (mod 400)
+ // b) year + kYearDelta > 0 for years in the range defined by
+ // ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
+ // Jan 1 1970. This is required so that we don't run into integer
+ // division of negative numbers.
+ // c) there shouldn't be an overflow for 32-bit integers in the following
+ // operations.
+ static const int kYearDelta = 399999;
+ static const int kBaseDay =
+ 365 * (1970 + kYearDelta) + (1970 + kYearDelta) / 4 -
+ (1970 + kYearDelta) / 100 + (1970 + kYearDelta) / 400;
+ int day_from_year = 365 * (y + kYearDelta) + (y + kYearDelta) / 4 -
+ (y + kYearDelta) / 100 + (y + kYearDelta) / 400 -
+ kBaseDay;
+ if ((y % 4 != 0) || (y % 100 == 0 && y % 400 != 0)) {
+ static const int kDayFromMonth[] = {0, 31, 59, 90, 120, 151,
+ 181, 212, 243, 273, 304, 334};
+ day_from_year += kDayFromMonth[m];
+ } else {
+ static const int kDayFromMonth[] = {0, 31, 60, 91, 121, 152,
+ 182, 213, 244, 274, 305, 335};
+ day_from_year += kDayFromMonth[m];
+ }
+ return static_cast<double>(day_from_year - 1) + DoubleToInteger(date);
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+double MakeTime(double hour, double min, double sec, double ms) {
+ if (std::isfinite(hour) && std::isfinite(min) && std::isfinite(sec) &&
+ std::isfinite(ms)) {
+ double const h = DoubleToInteger(hour);
+ double const m = DoubleToInteger(min);
+ double const s = DoubleToInteger(sec);
+ double const milli = DoubleToInteger(ms);
+ return h * kMsPerHour + m * kMsPerMinute + s * kMsPerSecond + milli;
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/date/date.h b/deps/v8/src/date/date.h
index 1f6c79c5d4..734ab3a26f 100644
--- a/deps/v8/src/date/date.h
+++ b/deps/v8/src/date/date.h
@@ -236,6 +236,17 @@ class V8_EXPORT_PRIVATE DateCache {
base::TimezoneCache* tz_cache_;
};
+// Routines shared between Date and Temporal
+
+// ES6 section 20.3.1.14 MakeDate (day, time)
+double MakeDate(double day, double time);
+
+// ES6 section 20.3.1.13 MakeDay (year, month, date)
+double MakeDay(double year, double month, double date);
+
+// ES6 section 20.3.1.12 MakeTime (hour, min, sec, ms)
+double MakeTime(double hour, double min, double sec, double ms);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index cecf46d7b7..5940e2dd02 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -34,9 +34,8 @@ static MaybeHandle<SharedFunctionInfo> GetFunctionInfo(Isolate* isolate,
ScriptOriginOptions(false, true));
script_details.repl_mode = repl_mode;
return Compiler::GetSharedFunctionInfoForScript(
- isolate, source, script_details, nullptr, nullptr,
- ScriptCompiler::kNoCompileOptions, ScriptCompiler::kNoCacheNoReason,
- NOT_NATIVES_CODE);
+ isolate, source, script_details, ScriptCompiler::kNoCompileOptions,
+ ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE);
}
} // namespace
@@ -391,8 +390,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
/* Test */ \
V(GetOptimizationStatus) \
V(OptimizeFunctionOnNextCall) \
- V(OptimizeOsr) \
- V(UnblockConcurrentRecompilation)
+ V(OptimizeOsr)
// Intrinsics with inline versions have to be allowlisted here a second time.
#define INLINE_INTRINSIC_ALLOWLIST(V) \
@@ -1061,6 +1059,14 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtin caller,
case Builtin::kTSANRelaxedStore32SaveFP:
case Builtin::kTSANRelaxedStore64IgnoreFP:
case Builtin::kTSANRelaxedStore64SaveFP:
+ case Builtin::kTSANSeqCstStore8IgnoreFP:
+ case Builtin::kTSANSeqCstStore8SaveFP:
+ case Builtin::kTSANSeqCstStore16IgnoreFP:
+ case Builtin::kTSANSeqCstStore16SaveFP:
+ case Builtin::kTSANSeqCstStore32IgnoreFP:
+ case Builtin::kTSANSeqCstStore32SaveFP:
+ case Builtin::kTSANSeqCstStore64IgnoreFP:
+ case Builtin::kTSANSeqCstStore64SaveFP:
case Builtin::kTSANRelaxedLoad32IgnoreFP:
case Builtin::kTSANRelaxedLoad32SaveFP:
case Builtin::kTSANRelaxedLoad64IgnoreFP:
diff --git a/deps/v8/src/debug/debug-interface.cc b/deps/v8/src/debug/debug-interface.cc
index 5112c5ba73..50c63e8f8e 100644
--- a/deps/v8/src/debug/debug-interface.cc
+++ b/deps/v8/src/debug/debug-interface.cc
@@ -4,6 +4,7 @@
#include "src/debug/debug-interface.h"
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/script-details.h"
@@ -760,8 +761,8 @@ MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* v8_isolate,
{
i::AlignedCachedData* cached_data = nullptr;
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
- i::Compiler::GetSharedFunctionInfoForScript(
- isolate, str, i::ScriptDetails(), nullptr, cached_data,
+ i::Compiler::GetSharedFunctionInfoForScriptWithCachedData(
+ isolate, str, i::ScriptDetails(), cached_data,
ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheBecauseInspector,
i::FLAG_expose_inspector_scripts ? i::NOT_NATIVES_CODE
@@ -862,7 +863,7 @@ Local<Function> GetBuiltin(Isolate* v8_isolate, Builtin requested_builtin) {
.set_map(isolate->strict_function_without_prototype_map())
.Build();
- fun->shared().set_internal_formal_parameter_count(0);
+ fun->shared().set_internal_formal_parameter_count(i::JSParameterCount(0));
fun->shared().set_length(0);
return Utils::ToLocal(handle_scope.CloseAndEscape(fun));
}
@@ -1034,16 +1035,6 @@ int64_t GetNextRandomInt64(v8::Isolate* v8_isolate) {
->NextInt64();
}
-void EnumerateRuntimeCallCounters(v8::Isolate* v8_isolate,
- RuntimeCallCounterCallback callback) {
-#ifdef V8_RUNTIME_CALL_STATS
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- if (isolate->counters()) {
- isolate->counters()->runtime_call_stats()->EnumerateCounters(callback);
- }
-#endif // V8_RUNTIME_CALL_STATS
-}
-
int GetDebuggingId(v8::Local<v8::Function> function) {
i::Handle<i::JSReceiver> callable = v8::Utils::OpenHandle(*function);
if (!callable->IsJSFunction()) return i::DebugInfo::kNoDebuggingId;
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 81d38011cb..b186ab5689 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -7,9 +7,14 @@
#include <memory>
+#include "include/v8-callbacks.h"
+#include "include/v8-debug.h"
+#include "include/v8-embedder-heap.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-memory-span.h"
+#include "include/v8-promise.h"
+#include "include/v8-script.h"
#include "include/v8-util.h"
-#include "include/v8.h"
-#include "src/base/platform/time.h"
#include "src/base/vector.h"
#include "src/common/globals.h"
#include "src/debug/interface-types.h"
@@ -20,6 +25,8 @@ class V8Inspector;
namespace v8 {
+class Platform;
+
namespace internal {
struct CoverageBlock;
struct CoverageFunction;
@@ -515,11 +522,6 @@ enum class NativeAccessorType {
int64_t GetNextRandomInt64(v8::Isolate* isolate);
-using RuntimeCallCounterCallback =
- std::function<void(const char* name, int64_t count, base::TimeDelta time)>;
-void EnumerateRuntimeCallCounters(v8::Isolate* isolate,
- RuntimeCallCounterCallback callback);
-
MaybeLocal<Value> CallFunctionOn(Local<Context> context,
Local<Function> function, Local<Value> recv,
int argc, Local<Value> argv[],
diff --git a/deps/v8/src/debug/debug-property-iterator.h b/deps/v8/src/debug/debug-property-iterator.h
index 38c78b12bd..4e6a93f10e 100644
--- a/deps/v8/src/debug/debug-property-iterator.h
+++ b/deps/v8/src/debug/debug-property-iterator.h
@@ -5,14 +5,18 @@
#ifndef V8_DEBUG_DEBUG_PROPERTY_ITERATOR_H_
#define V8_DEBUG_DEBUG_PROPERTY_ITERATOR_H_
+#include "include/v8-local-handle.h"
+#include "include/v8-maybe.h"
+#include "include/v8-object.h"
#include "src/debug/debug-interface.h"
#include "src/execution/isolate.h"
#include "src/handles/handles.h"
#include "src/objects/prototype.h"
-#include "include/v8.h"
-
namespace v8 {
+
+class Name;
+
namespace internal {
class JSReceiver;
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 41775c8965..4cf0124e8c 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -1325,7 +1325,7 @@ class DiscardBaselineCodeVisitor : public ThreadVisitor {
void Debug::DiscardBaselineCode(SharedFunctionInfo shared) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
- DCHECK(shared.HasBaselineData());
+ DCHECK(shared.HasBaselineCode());
Isolate* isolate = shared.GetIsolate();
DiscardBaselineCodeVisitor visitor(shared);
visitor.VisitThread(isolate, isolate->thread_local_top());
@@ -1333,7 +1333,7 @@ void Debug::DiscardBaselineCode(SharedFunctionInfo shared) {
// TODO(v8:11429): Avoid this heap walk somehow.
HeapObjectIterator iterator(isolate->heap());
auto trampoline = BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
- shared.flush_baseline_data();
+ shared.FlushBaselineCode();
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
if (obj.IsJSFunction()) {
@@ -1356,9 +1356,14 @@ void Debug::DiscardAllBaselineCode() {
obj = iterator.Next()) {
if (obj.IsJSFunction()) {
JSFunction fun = JSFunction::cast(obj);
- if (fun.shared().HasBaselineData()) {
+ if (fun.ActiveTierIsBaseline()) {
fun.set_code(*trampoline);
}
+ } else if (obj.IsSharedFunctionInfo()) {
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(obj);
+ if (shared.HasBaselineCode()) {
+ shared.FlushBaselineCode();
+ }
}
}
}
@@ -1369,7 +1374,7 @@ void Debug::DeoptimizeFunction(Handle<SharedFunctionInfo> shared) {
// inlining.
isolate_->AbortConcurrentOptimization(BlockingBehavior::kBlock);
- if (shared->HasBaselineData()) {
+ if (shared->HasBaselineCode()) {
DiscardBaselineCode(*shared);
}
@@ -1399,26 +1404,35 @@ void Debug::PrepareFunctionForDebugExecution(
DCHECK(shared->is_compiled());
DCHECK(shared->HasDebugInfo());
Handle<DebugInfo> debug_info = GetOrCreateDebugInfo(shared);
- if (debug_info->flags(kRelaxedLoad) & DebugInfo::kPreparedForDebugExecution)
+ if (debug_info->flags(kRelaxedLoad) & DebugInfo::kPreparedForDebugExecution) {
return;
-
- if (shared->HasBytecodeArray()) {
- SharedFunctionInfo::InstallDebugBytecode(shared, isolate_);
}
+ // Have to discard baseline code before installing debug bytecode, since the
+ // bytecode array field on the baseline code object is immutable.
if (debug_info->CanBreakAtEntry()) {
// Deopt everything in case the function is inlined anywhere.
Deoptimizer::DeoptimizeAll(isolate_);
DiscardAllBaselineCode();
- InstallDebugBreakTrampoline();
} else {
DeoptimizeFunction(shared);
+ }
+
+ if (shared->HasBytecodeArray()) {
+ DCHECK(!shared->HasBaselineCode());
+ SharedFunctionInfo::InstallDebugBytecode(shared, isolate_);
+ }
+
+ if (debug_info->CanBreakAtEntry()) {
+ InstallDebugBreakTrampoline();
+ } else {
// Update PCs on the stack to point to recompiled code.
RedirectActiveFunctions redirect_visitor(
*shared, RedirectActiveFunctions::Mode::kUseDebugBytecode);
redirect_visitor.VisitThread(isolate_, isolate_->thread_local_top());
isolate_->thread_manager()->IterateArchivedThreads(&redirect_visitor);
}
+
debug_info->set_flags(
debug_info->flags(kRelaxedLoad) | DebugInfo::kPreparedForDebugExecution,
kRelaxedStore);
@@ -2183,8 +2197,7 @@ bool Debug::ShouldBeSkipped() {
DisableBreak no_recursive_break(this);
StackTraceFrameIterator iterator(isolate_);
- CommonFrame* frame = iterator.frame();
- FrameSummary summary = FrameSummary::GetTop(frame);
+ FrameSummary summary = iterator.GetTopValidFrame();
Handle<Object> script_obj = summary.script();
if (!script_obj->IsScript()) return false;
diff --git a/deps/v8/src/debug/interface-types.h b/deps/v8/src/debug/interface-types.h
index a2645d33d6..8c8d4bf2ad 100644
--- a/deps/v8/src/debug/interface-types.h
+++ b/deps/v8/src/debug/interface-types.h
@@ -9,11 +9,14 @@
#include <string>
#include <vector>
-#include "include/v8.h"
+#include "include/v8-function-callback.h"
+#include "include/v8-local-handle.h"
#include "src/common/globals.h"
namespace v8 {
+class String;
+
namespace internal {
class BuiltinArguments;
} // namespace internal
diff --git a/deps/v8/src/deoptimizer/deoptimized-frame-info.cc b/deps/v8/src/deoptimizer/deoptimized-frame-info.cc
index a424a73ea1..c268d7258f 100644
--- a/deps/v8/src/deoptimizer/deoptimized-frame-info.cc
+++ b/deps/v8/src/deoptimizer/deoptimized-frame-info.cc
@@ -27,15 +27,17 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
TranslatedState::iterator frame_it,
Isolate* isolate) {
int parameter_count =
- frame_it->shared_info()->internal_formal_parameter_count();
+ frame_it->shared_info()
+ ->internal_formal_parameter_count_without_receiver();
TranslatedFrame::iterator stack_it = frame_it->begin();
// Get the function. Note that this might materialize the function.
// In case the debugger mutates this value, we should deoptimize
// the function and remember the value in the materialized value store.
- DCHECK_EQ(parameter_count, Handle<JSFunction>::cast(stack_it->GetValue())
- ->shared()
- .internal_formal_parameter_count());
+ DCHECK_EQ(parameter_count,
+ Handle<JSFunction>::cast(stack_it->GetValue())
+ ->shared()
+ .internal_formal_parameter_count_without_receiver());
stack_it++; // Skip the function.
stack_it++; // Skip the receiver.
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index ea460aa36f..6bf26d5bf3 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -477,15 +477,6 @@ const char* Deoptimizer::MessageFor(DeoptimizeKind kind, bool reuse_code) {
}
}
-namespace {
-
-uint16_t InternalFormalParameterCountWithReceiver(SharedFunctionInfo sfi) {
- static constexpr int kTheReceiver = 1;
- return sfi.internal_formal_parameter_count() + kTheReceiver;
-}
-
-} // namespace
-
Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
DeoptimizeKind kind, unsigned deopt_exit_index,
Address from, int fp_to_sp_delta)
@@ -541,7 +532,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
}
unsigned size = ComputeInputFrameSize();
const int parameter_count =
- InternalFormalParameterCountWithReceiver(function.shared());
+ function.shared().internal_formal_parameter_count_with_receiver();
input_ = new (size) FrameDescription(size, parameter_count);
if (kSupportsFixedDeoptExitSizes) {
@@ -903,9 +894,10 @@ void Deoptimizer::DoComputeOutputFrames() {
isolate_, input_->GetFramePointerAddress(), stack_fp_, &state_iterator,
input_data.LiteralArray(), input_->GetRegisterValues(), trace_file,
function_.IsHeapObject()
- ? function_.shared().internal_formal_parameter_count()
+ ? function_.shared()
+ .internal_formal_parameter_count_without_receiver()
: 0,
- actual_argument_count_);
+ actual_argument_count_ - kJSArgcReceiverSlots);
// Do the input frame to output frame(s) translation.
size_t count = translated_state_.frames().size();
@@ -1026,7 +1018,8 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
const int bytecode_offset =
goto_catch_handler ? catch_handler_pc_offset_ : real_bytecode_offset;
- const int parameters_count = InternalFormalParameterCountWithReceiver(shared);
+ const int parameters_count =
+ shared.internal_formal_parameter_count_with_receiver();
// If this is the bottom most frame or the previous frame was the arguments
// adaptor fake frame, then we already have extra arguments in the stack
@@ -1068,7 +1061,7 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
const bool advance_bc =
(!is_topmost || (deopt_kind_ == DeoptimizeKind::kLazy)) &&
!goto_catch_handler;
- const bool is_baseline = shared.HasBaselineData();
+ const bool is_baseline = shared.HasBaselineCode();
Code dispatch_builtin =
builtins->code(DispatchBuiltinFor(is_baseline, advance_bc));
@@ -1100,11 +1093,13 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
}
// Note: parameters_count includes the receiver.
+ // TODO(v8:11112): Simplify once the receiver is always included in argc.
if (verbose_tracing_enabled() && is_bottommost &&
- actual_argument_count_ > parameters_count - 1) {
- PrintF(trace_scope_->file(),
- " -- %d extra argument(s) already in the stack --\n",
- actual_argument_count_ - parameters_count + 1);
+ actual_argument_count_ - kJSArgcReceiverSlots > parameters_count - 1) {
+ PrintF(
+ trace_scope_->file(),
+ " -- %d extra argument(s) already in the stack --\n",
+ actual_argument_count_ - kJSArgcReceiverSlots - parameters_count + 1);
}
frame_writer.PushStackJSArguments(value_iterator, parameters_count);
@@ -1185,7 +1180,7 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
(translated_state_.frames()[frame_index - 1]).kind();
argc = previous_frame_kind == TranslatedFrame::kArgumentsAdaptor
? output_[frame_index - 1]->parameter_count()
- : parameters_count - 1;
+ : parameters_count - (kJSArgcIncludesReceiver ? 0 : 1);
}
frame_writer.PushRawValue(argc, "actual argument count\n");
@@ -1334,7 +1329,8 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
TranslatedFrame::iterator value_iterator = translated_frame->begin();
const int argument_count_without_receiver = translated_frame->height() - 1;
const int formal_parameter_count =
- translated_frame->raw_shared_info().internal_formal_parameter_count();
+ translated_frame->raw_shared_info()
+ .internal_formal_parameter_count_without_receiver();
const int extra_argument_count =
argument_count_without_receiver - formal_parameter_count;
// The number of pushed arguments is the maximum of the actual argument count
@@ -1350,8 +1346,8 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
}
// Allocate and store the output frame description.
- FrameDescription* output_frame = new (output_frame_size)
- FrameDescription(output_frame_size, argument_count_without_receiver);
+ FrameDescription* output_frame = new (output_frame_size) FrameDescription(
+ output_frame_size, JSParameterCount(argument_count_without_receiver));
// The top address of the frame is computed from the previous frame's top and
// this frame's size.
const intptr_t top_address =
@@ -1470,9 +1466,8 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
frame_writer.PushTranslatedValue(value_iterator++, "context");
// Number of incoming arguments.
- const uint32_t parameters_count_without_receiver = parameters_count - 1;
- frame_writer.PushRawObject(Smi::FromInt(parameters_count_without_receiver),
- "argc\n");
+ const uint32_t argc = parameters_count - (kJSArgcIncludesReceiver ? 0 : 1);
+ frame_writer.PushRawObject(Smi::FromInt(argc), "argc\n");
// The constructor function was mentioned explicitly in the
// CONSTRUCT_STUB_FRAME.
@@ -2067,7 +2062,7 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
// static
unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo shared) {
- int parameter_slots = InternalFormalParameterCountWithReceiver(shared);
+ int parameter_slots = shared.internal_formal_parameter_count_with_receiver();
return parameter_slots * kSystemPointerSize;
}
diff --git a/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc b/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc
new file mode 100644
index 0000000000..fb82466af1
--- /dev/null
+++ b/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc
@@ -0,0 +1,42 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/deoptimizer/deoptimizer.h"
+
+namespace v8 {
+namespace internal {
+
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = 2 * kInstrSize;
+const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
+const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 3 * kInstrSize;
+const int Deoptimizer::kEagerWithResumeDeoptExitSize =
+ kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
+// TODO(LOONG_dev): LOONG64 Is the PcOffset right?
+const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = kInstrSize;
+const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
+ kInstrSize + kSystemPointerSize;
+
+Float32 RegisterValues::GetFloatRegister(unsigned n) const {
+ return Float32::FromBits(
+ static_cast<uint32_t>(double_registers_[n].get_bits()));
+}
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No embedded constant pool support.
+ UNREACHABLE();
+}
+
+void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/deoptimizer/translated-state.cc b/deps/v8/src/deoptimizer/translated-state.cc
index 4f5e3370e6..721918c195 100644
--- a/deps/v8/src/deoptimizer/translated-state.cc
+++ b/deps/v8/src/deoptimizer/translated-state.cc
@@ -678,15 +678,6 @@ TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationWithCatchFrame(
return frame;
}
-namespace {
-
-uint16_t InternalFormalParameterCountWithReceiver(SharedFunctionInfo sfi) {
- static constexpr int kTheReceiver = 1;
- return sfi.internal_formal_parameter_count() + kTheReceiver;
-}
-
-} // namespace
-
int TranslatedFrame::GetValueCount() {
// The function is added to all frame state descriptors in
// InstructionSelector::AddInputsToFrameStateDescriptor.
@@ -695,7 +686,7 @@ int TranslatedFrame::GetValueCount() {
switch (kind()) {
case kUnoptimizedFunction: {
int parameter_count =
- InternalFormalParameterCountWithReceiver(raw_shared_info_);
+ raw_shared_info_.internal_formal_parameter_count_with_receiver();
static constexpr int kTheContext = 1;
static constexpr int kTheAccumulator = 1;
return height() + parameter_count + kTheContext + kTheFunction +
@@ -748,7 +739,8 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
if (trace_file != nullptr) {
std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
PrintF(trace_file, " reading input frame %s", name.get());
- int arg_count = InternalFormalParameterCountWithReceiver(shared_info);
+ int arg_count =
+ shared_info.internal_formal_parameter_count_with_receiver();
PrintF(trace_file,
" => bytecode_offset=%d, args=%d, height=%d, retval=%i(#%i); "
"inputs:\n",
@@ -1298,7 +1290,9 @@ TranslatedState::TranslatedState(const JavaScriptFrame* frame)
int actual_argc = frame->GetActualArgumentCount();
Init(frame->isolate(), frame->fp(), frame->fp(), &it, data.LiteralArray(),
nullptr /* registers */, nullptr /* trace file */,
- frame->function().shared().internal_formal_parameter_count(),
+ frame->function()
+ .shared()
+ .internal_formal_parameter_count_without_receiver(),
actual_argc);
}
@@ -1977,21 +1971,21 @@ TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
// be shown in a stack trace.
if (frames_[i].kind() ==
TranslatedFrame::kJavaScriptBuiltinContinuation &&
- frames_[i].shared_info()->internal_formal_parameter_count() ==
- kDontAdaptArgumentsSentinel) {
+ frames_[i].shared_info()->IsDontAdaptArguments()) {
DCHECK(frames_[i].shared_info()->IsApiFunction());
// The argument count for this special case is always the second
// to last value in the TranslatedFrame. It should also always be
- // {1}, as the GenericLazyDeoptContinuation builtin only has one
- // argument (the receiver).
+ // {1}, as the GenericLazyDeoptContinuation builtin has one explicit
+ // argument (the result).
static constexpr int kTheContext = 1;
const int height = frames_[i].height() + kTheContext;
*args_count = frames_[i].ValueAt(height - 1)->GetSmiValue();
- DCHECK_EQ(*args_count, 1);
+ DCHECK_EQ(*args_count, JSParameterCount(1));
} else {
- *args_count = InternalFormalParameterCountWithReceiver(
- *frames_[i].shared_info());
+ *args_count = frames_[i]
+ .shared_info()
+ ->internal_formal_parameter_count_with_receiver();
}
return &(frames_[i]);
}
diff --git a/deps/v8/src/diagnostics/arm/disasm-arm.cc b/deps/v8/src/diagnostics/arm/disasm-arm.cc
index cf37d12a1f..7ba20c0d98 100644
--- a/deps/v8/src/diagnostics/arm/disasm-arm.cc
+++ b/deps/v8/src/diagnostics/arm/disasm-arm.cc
@@ -676,7 +676,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
default: {
UNREACHABLE();
- return -1;
}
}
out_buffer_pos_ +=
@@ -787,7 +786,6 @@ void Decoder::DecodeType01(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else {
// strex
@@ -808,7 +806,6 @@ void Decoder::DecodeType01(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
} else {
@@ -853,7 +850,6 @@ void Decoder::DecodeType01(Instruction* instr) {
default: {
// The PU field is a 2-bit field.
UNREACHABLE();
- break;
}
}
} else {
@@ -894,7 +890,6 @@ void Decoder::DecodeType01(Instruction* instr) {
default: {
// The PU field is a 2-bit field.
UNREACHABLE();
- break;
}
}
return;
@@ -1030,7 +1025,6 @@ void Decoder::DecodeType01(Instruction* instr) {
default: {
// The Opcode field is a 4-bit field.
UNREACHABLE();
- break;
}
}
}
@@ -1107,10 +1101,8 @@ void Decoder::DecodeType3(Instruction* instr) {
break;
case 1:
UNREACHABLE();
- break;
case 2:
UNREACHABLE();
- break;
case 3:
Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
break;
@@ -1119,7 +1111,6 @@ void Decoder::DecodeType3(Instruction* instr) {
switch (instr->Bits(22, 21)) {
case 0:
UNREACHABLE();
- break;
case 1:
if (instr->Bits(9, 6) == 1) {
if (instr->Bit(20) == 0) {
@@ -1948,7 +1939,6 @@ void Decoder::DecodeFloatingPointDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
- break;
}
} else if (instr->Opc1Value() == 0x4 && op2) {
// Floating-point minNum/maxNum.
@@ -2002,7 +1992,6 @@ void Decoder::DecodeFloatingPointDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
- break;
}
} else {
Unknown(instr);
@@ -2617,12 +2606,10 @@ const char* NameConverter::NameOfCPURegister(int reg) const {
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // ARM does not have the concept of a byte register
- return "nobytereg";
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
UNREACHABLE(); // ARM does not have any XMM registers
- return "noxmmreg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/deps/v8/src/diagnostics/arm/eh-frame-arm.cc b/deps/v8/src/diagnostics/arm/eh-frame-arm.cc
index 7d0dc49155..ef0a421820 100644
--- a/deps/v8/src/diagnostics/arm/eh-frame-arm.cc
+++ b/deps/v8/src/diagnostics/arm/eh-frame-arm.cc
@@ -37,7 +37,6 @@ int EhFrameWriter::RegisterToDwarfCode(Register name) {
return kR0DwarfCode;
default:
UNIMPLEMENTED();
- return -1;
}
}
@@ -54,7 +53,6 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "lr";
default:
UNIMPLEMENTED();
- return nullptr;
}
}
diff --git a/deps/v8/src/diagnostics/arm/unwinder-arm.cc b/deps/v8/src/diagnostics/arm/unwinder-arm.cc
index e0e2f0e91f..e51804caea 100644
--- a/deps/v8/src/diagnostics/arm/unwinder-arm.cc
+++ b/deps/v8/src/diagnostics/arm/unwinder-arm.cc
@@ -5,7 +5,7 @@
#include <memory>
#include "include/v8-unwinder-state.h"
-#include "include/v8.h"
+#include "include/v8-unwinder.h"
#include "src/diagnostics/unwinder.h"
#include "src/execution/frame-constants.h"
diff --git a/deps/v8/src/diagnostics/arm64/disasm-arm64.cc b/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
index 93b9531bd5..af6e7f5441 100644
--- a/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
+++ b/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
@@ -3954,7 +3954,6 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
}
default: {
UNIMPLEMENTED();
- return 0;
}
}
}
@@ -3997,7 +3996,6 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
return 0;
}
UNIMPLEMENTED();
- return 0;
}
case 'L': { // IVLSLane[0123] - suffix indicates access size shift.
AppendToOutput("%d", instr->NEONLSIndex(format[8] - '0'));
@@ -4042,12 +4040,10 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
return static_cast<int>(strlen("IVMIShiftAmt2"));
} else {
UNIMPLEMENTED();
- return 0;
}
}
default: {
UNIMPLEMENTED();
- return 0;
}
}
}
@@ -4342,12 +4338,10 @@ const char* NameConverter::NameOfCPURegister(int reg) const {
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // ARM64 does not have the concept of a byte register
- return "nobytereg";
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
UNREACHABLE(); // ARM64 does not have any XMM registers
- return "noxmmreg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/deps/v8/src/diagnostics/arm64/eh-frame-arm64.cc b/deps/v8/src/diagnostics/arm64/eh-frame-arm64.cc
index 115d0cc300..d27827cfc1 100644
--- a/deps/v8/src/diagnostics/arm64/eh-frame-arm64.cc
+++ b/deps/v8/src/diagnostics/arm64/eh-frame-arm64.cc
@@ -38,7 +38,6 @@ int EhFrameWriter::RegisterToDwarfCode(Register name) {
return kX0DwarfCode;
default:
UNIMPLEMENTED();
- return -1;
}
}
@@ -55,7 +54,6 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "sp"; // This could be zr as well
default:
UNIMPLEMENTED();
- return nullptr;
}
}
diff --git a/deps/v8/src/diagnostics/compilation-statistics.cc b/deps/v8/src/diagnostics/compilation-statistics.cc
index 40bb239b12..74fa232a08 100644
--- a/deps/v8/src/diagnostics/compilation-statistics.cc
+++ b/deps/v8/src/diagnostics/compilation-statistics.cc
@@ -56,6 +56,29 @@ void CompilationStatistics::BasicStats::Accumulate(const BasicStats& stats) {
}
}
+std::string CompilationStatistics::BasicStats::AsJSON() {
+// clang-format off
+#define DICT(s) "{" << s << "}"
+#define QUOTE(s) "\"" << s << "\""
+#define MEMBER(s) QUOTE(s) << ":"
+
+ DCHECK_EQ(function_name_.find("\""), std::string::npos);
+
+ std::stringstream stream;
+ stream << DICT(
+ MEMBER("function_name") << QUOTE(function_name_) << ","
+ MEMBER("total_allocated_bytes") << total_allocated_bytes_ << ","
+ MEMBER("max_allocated_bytes") << max_allocated_bytes_ << ","
+ MEMBER("absolute_max_allocated_bytes") << absolute_max_allocated_bytes_);
+
+ return stream.str();
+
+#undef DICT
+#undef QUOTE
+#undef MEMBER
+ // clang-format on
+}
+
static void WriteLine(std::ostream& os, bool machine_format, const char* name,
const CompilationStatistics::BasicStats& stats,
const CompilationStatistics::BasicStats& total_stats) {
diff --git a/deps/v8/src/diagnostics/compilation-statistics.h b/deps/v8/src/diagnostics/compilation-statistics.h
index d14e108d07..a6abdf5e89 100644
--- a/deps/v8/src/diagnostics/compilation-statistics.h
+++ b/deps/v8/src/diagnostics/compilation-statistics.h
@@ -37,6 +37,8 @@ class CompilationStatistics final : public Malloced {
void Accumulate(const BasicStats& stats);
+ std::string AsJSON();
+
base::TimeDelta delta_;
size_t total_allocated_bytes_;
size_t max_allocated_bytes_;
diff --git a/deps/v8/src/diagnostics/eh-frame.cc b/deps/v8/src/diagnostics/eh-frame.cc
index d53ea7698a..223e288e6e 100644
--- a/deps/v8/src/diagnostics/eh-frame.cc
+++ b/deps/v8/src/diagnostics/eh-frame.cc
@@ -27,14 +27,12 @@ void EhFrameWriter::WriteInitialStateInCie() { UNIMPLEMENTED(); }
int EhFrameWriter::RegisterToDwarfCode(Register) {
UNIMPLEMENTED();
- return -1;
}
#ifdef ENABLE_DISASSEMBLER
const char* EhFrameDisassembler::DwarfRegisterCodeToString(int) {
UNIMPLEMENTED();
- return nullptr;
}
#endif
diff --git a/deps/v8/src/diagnostics/gdb-jit.cc b/deps/v8/src/diagnostics/gdb-jit.cc
index 53c29cfb24..bc03a189cd 100644
--- a/deps/v8/src/diagnostics/gdb-jit.cc
+++ b/deps/v8/src/diagnostics/gdb-jit.cc
@@ -4,14 +4,17 @@
#include "src/diagnostics/gdb-jit.h"
+#include <iterator>
#include <map>
#include <memory>
#include <vector>
-#include "include/v8.h"
+#include "include/v8-callbacks.h"
#include "src/api/api-inl.h"
+#include "src/base/address-region.h"
#include "src/base/bits.h"
#include "src/base/hashmap.h"
+#include "src/base/memory.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
#include "src/base/strings.h"
@@ -63,7 +66,9 @@ class Writer {
T* operator->() { return w_->RawSlotAt<T>(offset_); }
- void set(const T& value) { *w_->RawSlotAt<T>(offset_) = value; }
+ void set(const T& value) {
+ base::WriteUnalignedValue(w_->AddressAt<T>(offset_), value);
+ }
Slot<T> at(int i) { return Slot<T>(w_, offset_ + sizeof(T) * i); }
@@ -75,7 +80,7 @@ class Writer {
template <typename T>
void Write(const T& val) {
Ensure(position_ + sizeof(T));
- *RawSlotAt<T>(position_) = val;
+ base::WriteUnalignedValue(AddressAt<T>(position_), val);
position_ += sizeof(T);
}
@@ -154,6 +159,12 @@ class Writer {
friend class Slot;
template <typename T>
+ Address AddressAt(uintptr_t offset) {
+ DCHECK(offset < capacity_ && offset + sizeof(T) <= capacity_);
+ return reinterpret_cast<Address>(&buffer_[offset]);
+ }
+
+ template <typename T>
T* RawSlotAt(uintptr_t offset) {
DCHECK(offset < capacity_ && offset + sizeof(T) <= capacity_);
return reinterpret_cast<T*>(&buffer_[offset]);
@@ -896,17 +907,20 @@ class CodeDescription {
};
#endif
- CodeDescription(const char* name, Code code, SharedFunctionInfo shared,
- LineInfo* lineinfo)
- : name_(name), code_(code), shared_info_(shared), lineinfo_(lineinfo) {}
+ CodeDescription(const char* name, base::AddressRegion region,
+ SharedFunctionInfo shared, LineInfo* lineinfo,
+ bool is_function)
+ : name_(name),
+ shared_info_(shared),
+ lineinfo_(lineinfo),
+ is_function_(is_function),
+ code_region_(region) {}
const char* name() const { return name_; }
LineInfo* lineinfo() const { return lineinfo_; }
- bool is_function() const {
- return CodeKindIsOptimizedJSFunction(code_.kind());
- }
+ bool is_function() const { return is_function_; }
bool has_scope_info() const { return !shared_info_.is_null(); }
@@ -915,15 +929,11 @@ class CodeDescription {
return shared_info_.scope_info();
}
- uintptr_t CodeStart() const {
- return static_cast<uintptr_t>(code_.InstructionStart());
- }
+ uintptr_t CodeStart() const { return code_region_.begin(); }
- uintptr_t CodeEnd() const {
- return static_cast<uintptr_t>(code_.InstructionEnd());
- }
+ uintptr_t CodeEnd() const { return code_region_.end(); }
- uintptr_t CodeSize() const { return CodeEnd() - CodeStart(); }
+ uintptr_t CodeSize() const { return code_region_.size(); }
bool has_script() {
return !shared_info_.is_null() && shared_info_.script().IsScript();
@@ -933,6 +943,8 @@ class CodeDescription {
bool IsLineInfoAvailable() { return lineinfo_ != nullptr; }
+ base::AddressRegion region() { return code_region_; }
+
#if V8_TARGET_ARCH_X64
uintptr_t GetStackStateStartAddress(StackState state) const {
DCHECK(state < STACK_STATE_MAX);
@@ -946,7 +958,7 @@ class CodeDescription {
#endif
std::unique_ptr<char[]> GetFilename() {
- if (!shared_info_.is_null()) {
+ if (!shared_info_.is_null() && script().name().IsString()) {
return String::cast(script().name()).ToCString();
} else {
std::unique_ptr<char[]> result(new char[1]);
@@ -965,9 +977,10 @@ class CodeDescription {
private:
const char* name_;
- Code code_;
SharedFunctionInfo shared_info_;
LineInfo* lineinfo_;
+ bool is_function_;
+ base::AddressRegion code_region_;
#if V8_TARGET_ARCH_X64
uintptr_t stack_state_start_addresses_[STACK_STATE_MAX];
#endif
@@ -1080,6 +1093,8 @@ class DebugInfoSection : public DebugSection {
UNIMPLEMENTED();
#elif V8_TARGET_ARCH_MIPS64
UNIMPLEMENTED();
+#elif V8_TARGET_ARCH_LOONG64
+ UNIMPLEMENTED();
#elif V8_TARGET_ARCH_PPC64 && V8_OS_LINUX
w->Write<uint8_t>(DW_OP_reg31); // The frame pointer is here on PPC64.
#elif V8_TARGET_ARCH_S390
@@ -1092,7 +1107,7 @@ class DebugInfoSection : public DebugSection {
int params = scope.ParameterCount();
int context_slots = scope.ContextLocalCount();
// The real slot ID is internal_slots + context_slot_id.
- int internal_slots = Context::MIN_CONTEXT_SLOTS;
+ int internal_slots = scope.ContextHeaderLength();
int current_abbreviation = 4;
for (int param = 0; param < params; ++param) {
@@ -1109,7 +1124,7 @@ class DebugInfoSection : public DebugSection {
}
// See contexts.h for more information.
- DCHECK_EQ(Context::MIN_CONTEXT_SLOTS, 3);
+ DCHECK(internal_slots == 2 || internal_slots == 3);
DCHECK_EQ(Context::SCOPE_INFO_INDEX, 0);
DCHECK_EQ(Context::PREVIOUS_INDEX, 1);
DCHECK_EQ(Context::EXTENSION_INDEX, 2);
@@ -1117,8 +1132,10 @@ class DebugInfoSection : public DebugSection {
w->WriteString(".scope_info");
w->WriteULEB128(current_abbreviation++);
w->WriteString(".previous");
- w->WriteULEB128(current_abbreviation++);
- w->WriteString(".extension");
+ if (internal_slots == 3) {
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString(".extension");
+ }
for (int context_slot = 0; context_slot < context_slots; ++context_slot) {
w->WriteULEB128(current_abbreviation++);
@@ -1814,26 +1831,17 @@ static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) {
return CreateCodeEntry(reinterpret_cast<Address>(w.buffer()), w.position());
}
-struct AddressRange {
- Address start;
- Address end;
-};
-
-struct AddressRangeLess {
- bool operator()(const AddressRange& a, const AddressRange& b) const {
- if (a.start == b.start) return a.end < b.end;
- return a.start < b.start;
+// Like base::AddressRegion::StartAddressLess but also compares |end| when
+// |begin| is equal.
+struct AddressRegionLess {
+ bool operator()(const base::AddressRegion& a,
+ const base::AddressRegion& b) const {
+ if (a.begin() == b.begin()) return a.end() < b.end();
+ return a.begin() < b.begin();
}
};
-struct CodeMapConfig {
- using Key = AddressRange;
- using Value = JITCodeEntry*;
- using Less = AddressRangeLess;
-};
-
-using CodeMap =
- std::map<CodeMapConfig::Key, CodeMapConfig::Value, CodeMapConfig::Less>;
+using CodeMap = std::map<base::AddressRegion, JITCodeEntry*, AddressRegionLess>;
static CodeMap* GetCodeMap() {
// TODO(jgruber): Don't leak.
@@ -1907,50 +1915,72 @@ static void AddUnwindInfo(CodeDescription* desc) {
static base::LazyMutex mutex = LAZY_MUTEX_INITIALIZER;
-// Remove entries from the map that intersect the given address range,
-// and deregister them from GDB.
-static void RemoveJITCodeEntries(CodeMap* map, const AddressRange& range) {
- DCHECK(range.start < range.end);
+static base::Optional<std::pair<CodeMap::iterator, CodeMap::iterator>>
+GetOverlappingRegions(CodeMap* map, const base::AddressRegion region) {
+ DCHECK_LT(region.begin(), region.end());
- if (map->empty()) return;
+ if (map->empty()) return {};
// Find the first overlapping entry.
- // If successful, points to the first element not less than `range`. The
+ // If successful, points to the first element not less than `region`. The
// returned iterator has the key in `first` and the value in `second`.
- auto it = map->lower_bound(range);
+ auto it = map->lower_bound(region);
auto start_it = it;
if (it == map->end()) {
start_it = map->begin();
+ // Find the first overlapping entry.
+ for (; start_it != map->end(); ++start_it) {
+ if (start_it->first.end() > region.begin()) {
+ break;
+ }
+ }
} else if (it != map->begin()) {
for (--it; it != map->begin(); --it) {
- if ((*it).first.end <= range.start) break;
+ if ((*it).first.end() <= region.begin()) break;
+ start_it = it;
+ }
+ if (it == map->begin() && it->first.end() > region.begin()) {
start_it = it;
}
}
- DCHECK(start_it != map->end());
+ if (start_it == map->end()) {
+ return {};
+ }
- // Find the first non-overlapping entry after `range`.
+ // Find the first non-overlapping entry after `region`.
- const auto end_it = map->lower_bound({range.end, 0});
+ const auto end_it = map->lower_bound({region.end(), 0});
- // Evict intersecting ranges.
+ // Return a range containing intersecting regions.
- if (std::distance(start_it, end_it) < 1) return; // No overlapping entries.
+ if (std::distance(start_it, end_it) < 1)
+ return {}; // No overlapping entries.
- for (auto it = start_it; it != end_it; it++) {
- JITCodeEntry* old_entry = (*it).second;
- UnregisterCodeEntry(old_entry);
- DestroyCodeEntry(old_entry);
- }
+ return {{start_it, end_it}};
+}
+
+// Remove entries from the map that intersect the given address region,
+// and deregister them from GDB.
+static void RemoveJITCodeEntries(CodeMap* map,
+ const base::AddressRegion region) {
+ if (auto overlap = GetOverlappingRegions(map, region)) {
+ auto start_it = overlap->first;
+ auto end_it = overlap->second;
+ for (auto it = start_it; it != end_it; it++) {
+ JITCodeEntry* old_entry = (*it).second;
+ UnregisterCodeEntry(old_entry);
+ DestroyCodeEntry(old_entry);
+ }
- map->erase(start_it, end_it);
+ map->erase(start_it, end_it);
+ }
}
// Insert the entry into the map and register it with GDB.
-static void AddJITCodeEntry(CodeMap* map, const AddressRange& range,
+static void AddJITCodeEntry(CodeMap* map, const base::AddressRegion region,
JITCodeEntry* entry, bool dump_if_enabled,
const char* name_hint) {
#if defined(DEBUG) && !V8_OS_WIN
@@ -1967,24 +1997,21 @@ static void AddJITCodeEntry(CodeMap* map, const AddressRange& range,
}
#endif
- auto result = map->emplace(range, entry);
+ auto result = map->emplace(region, entry);
DCHECK(result.second); // Insertion happened.
USE(result);
RegisterCodeEntry(entry);
}
-static void AddCode(const char* name, Code code, SharedFunctionInfo shared,
- LineInfo* lineinfo) {
+static void AddCode(const char* name, base::AddressRegion region,
+ SharedFunctionInfo shared, LineInfo* lineinfo,
+ Isolate* isolate, bool is_function) {
DisallowGarbageCollection no_gc;
+ CodeDescription code_desc(name, region, shared, lineinfo, is_function);
CodeMap* code_map = GetCodeMap();
- AddressRange range;
- range.start = code.address();
- range.end = code.address() + code.CodeSize();
- RemoveJITCodeEntries(code_map, range);
-
- CodeDescription code_desc(name, code, shared, lineinfo);
+ RemoveJITCodeEntries(code_map, region);
if (!FLAG_gdbjit_full && !code_desc.IsLineInfoAvailable()) {
delete lineinfo;
@@ -1992,7 +2019,6 @@ static void AddCode(const char* name, Code code, SharedFunctionInfo shared,
}
AddUnwindInfo(&code_desc);
- Isolate* isolate = code.GetIsolate();
JITCodeEntry* entry = CreateELFObject(&code_desc, isolate);
delete lineinfo;
@@ -2008,25 +2034,40 @@ static void AddCode(const char* name, Code code, SharedFunctionInfo shared,
should_dump = (name_hint != nullptr);
}
}
- AddJITCodeEntry(code_map, range, entry, should_dump, name_hint);
+ AddJITCodeEntry(code_map, region, entry, should_dump, name_hint);
}
void EventHandler(const v8::JitCodeEvent* event) {
if (!FLAG_gdbjit) return;
- if (event->code_type != v8::JitCodeEvent::JIT_CODE) return;
+ if ((event->code_type != v8::JitCodeEvent::JIT_CODE) &&
+ (event->code_type != v8::JitCodeEvent::WASM_CODE)) {
+ return;
+ }
base::MutexGuard lock_guard(mutex.Pointer());
switch (event->type) {
case v8::JitCodeEvent::CODE_ADDED: {
Address addr = reinterpret_cast<Address>(event->code_start);
- Isolate* isolate = reinterpret_cast<Isolate*>(event->isolate);
- Code code = isolate->heap()->GcSafeFindCodeForInnerPointer(addr);
LineInfo* lineinfo = GetLineInfo(addr);
std::string event_name(event->name.str, event->name.len);
// It's called UnboundScript in the API but it's a SharedFunctionInfo.
SharedFunctionInfo shared = event->script.IsEmpty()
? SharedFunctionInfo()
: *Utils::OpenHandle(*event->script);
- AddCode(event_name.c_str(), code, shared, lineinfo);
+ Isolate* isolate = reinterpret_cast<Isolate*>(event->isolate);
+ bool is_function = false;
+ // TODO(zhin): See if we can use event->code_type to determine
+ // is_function, the difference currently is that JIT_CODE is SparkPlug,
+ // TurboProp, TurboFan, whereas CodeKindIsOptimizedJSFunction is only
+ // TurboProp and TurboFan. is_function is used for AddUnwindInfo, and the
+ // prologue that SP generates probably matches that of TP/TF, so we can
+ // use event->code_type here instead of finding the Code.
+ // TODO(zhin): Rename is_function to be more accurate.
+ if (event->code_type == v8::JitCodeEvent::JIT_CODE) {
+ Code code = isolate->heap()->GcSafeFindCodeForInnerPointer(addr);
+ is_function = CodeKindIsOptimizedJSFunction(code.kind());
+ }
+ AddCode(event_name.c_str(), {addr, event->code_len}, shared, lineinfo,
+ isolate, is_function);
break;
}
case v8::JitCodeEvent::CODE_MOVED:
@@ -2056,6 +2097,23 @@ void EventHandler(const v8::JitCodeEvent* event) {
}
}
}
+
+void AddRegionForTesting(const base::AddressRegion region) {
+ // For testing purposes we don't care about JITCodeEntry, pass nullptr.
+ auto result = GetCodeMap()->emplace(region, nullptr);
+ DCHECK(result.second); // Insertion happened.
+ USE(result);
+}
+
+void ClearCodeMapForTesting() { GetCodeMap()->clear(); }
+
+size_t NumOverlapEntriesForTesting(const base::AddressRegion region) {
+ if (auto overlaps = GetOverlappingRegions(GetCodeMap(), region)) {
+ return std::distance(overlaps->first, overlaps->second);
+ }
+ return 0;
+}
+
#endif
} // namespace GDBJITInterface
} // namespace internal
diff --git a/deps/v8/src/diagnostics/gdb-jit.h b/deps/v8/src/diagnostics/gdb-jit.h
index 82f5ce892c..eb4d515a81 100644
--- a/deps/v8/src/diagnostics/gdb-jit.h
+++ b/deps/v8/src/diagnostics/gdb-jit.h
@@ -5,6 +5,8 @@
#ifndef V8_DIAGNOSTICS_GDB_JIT_H_
#define V8_DIAGNOSTICS_GDB_JIT_H_
+#include "src/base/address-region.h"
+
//
// GDB has two ways of interacting with JIT code. With the "JIT compilation
// interface", V8 can tell GDB when it emits JIT code. Unfortunately to do so,
@@ -29,9 +31,19 @@ struct JitCodeEvent;
namespace internal {
namespace GDBJITInterface {
#ifdef ENABLE_GDB_JIT_INTERFACE
+
// JitCodeEventHandler that creates ELF/Mach-O objects and registers them with
// GDB.
void EventHandler(const v8::JitCodeEvent* event);
+
+// Expose some functions for unittests. These only exercise the logic to add
+// AddressRegion to CodeMap, and checking for overlap. It does not touch the
+// actual JITCodeEntry at all.
+V8_EXPORT_PRIVATE void AddRegionForTesting(const base::AddressRegion region);
+V8_EXPORT_PRIVATE void ClearCodeMapForTesting();
+V8_EXPORT_PRIVATE size_t
+NumOverlapEntriesForTesting(const base::AddressRegion region);
+
#endif
} // namespace GDBJITInterface
} // namespace internal
diff --git a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
index de124de747..8f721c997d 100644
--- a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
+++ b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
@@ -89,6 +89,10 @@ static const char* const conditional_move_mnem[] = {
/*8*/ "cmovs", "cmovns", "cmovpe", "cmovpo",
/*12*/ "cmovl", "cmovnl", "cmovng", "cmovg"};
+static const char* const cmp_pseudo_op[16] = {
+ "eq", "lt", "le", "unord", "neq", "nlt", "nle", "ord",
+ "eq_uq", "nge", "ngt", "false", "neq_oq", "ge", "gt", "true"};
+
enum InstructionType {
NO_INSTR,
ZERO_OPERANDS_INSTR,
@@ -415,13 +419,11 @@ int DisassemblerIA32::PrintRightOperandHelper(
UnimplementedInstruction();
return 1;
}
- } else {
- AppendToBuffer("[%s]", (this->*register_name)(rm));
- return 1;
}
- break;
+ AppendToBuffer("[%s]", (this->*register_name)(rm));
+ return 1;
case 1: // fall through
- case 2:
+ case 2: {
if (rm == esp) {
byte sib = *(modrmp + 1);
int scale, index, base;
@@ -436,14 +438,13 @@ int DisassemblerIA32::PrintRightOperandHelper(
disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
}
return mod == 2 ? 6 : 3;
- } else {
- // No sib.
- int disp = mod == 2 ? Imm32(modrmp + 1) : Imm8(modrmp + 1);
- AppendToBuffer("[%s%s0x%x]", (this->*register_name)(rm),
- disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
- return mod == 2 ? 5 : 2;
}
- break;
+ // No sib.
+ int disp = mod == 2 ? Imm32(modrmp + 1) : Imm8(modrmp + 1);
+ AppendToBuffer("[%s%s0x%x]", (this->*register_name)(rm),
+ disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
+ return mod == 2 ? 5 : 2;
+ }
case 3:
AppendToBuffer("%s", (this->*register_name)(rm));
return 1;
@@ -789,6 +790,15 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
SSSE3_UNOP_INSTRUCTION_LIST(DECLARE_SSE_AVX_RM_DIS_CASE)
SSE4_RM_INSTRUCTION_LIST(DECLARE_SSE_AVX_RM_DIS_CASE)
#undef DECLARE_SSE_AVX_RM_DIS_CASE
+
+#define DISASSEMBLE_AVX2_BROADCAST(instruction, _1, _2, _3, code) \
+ case 0x##code: \
+ AppendToBuffer("" #instruction " %s,", NameOfXMMRegister(regop)); \
+ current += PrintRightXMMOperand(current); \
+ break;
+ AVX2_BROADCAST_LIST(DISASSEMBLE_AVX2_BROADCAST)
+#undef DISASSEMBLE_AVX2_BROADCAST
+
default:
UnimplementedInstruction();
}
@@ -1243,12 +1253,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
current += PrintRightXMMOperand(current);
break;
case 0xC2: {
- const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
- "neq", "nlt", "nle", "ord"};
AppendToBuffer("vcmpps %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
- AppendToBuffer(", (%s)", pseudo_op[*current]);
+ AppendToBuffer(", (%s)", cmp_pseudo_op[*current]);
current++;
break;
}
@@ -1371,11 +1379,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
case 0xC2: {
- const char* const pseudo_op[] = {"eq", "lt", "le", "unord", "neq"};
AppendToBuffer("vcmppd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
- AppendToBuffer(", (%s)", pseudo_op[*current]);
+ AppendToBuffer(", (%s)", cmp_pseudo_op[*current]);
current++;
break;
}
@@ -1999,11 +2006,9 @@ int DisassemblerIA32::InstructionDecode(v8::base::Vector<char> out_buffer,
data += PrintOperands("xadd", OPER_REG_OP_ORDER, data);
} else if (f0byte == 0xC2) {
data += 2;
- const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
- "neq", "nlt", "nle", "ord"};
AppendToBuffer("cmpps %s, ", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- AppendToBuffer(", (%s)", pseudo_op[*data]);
+ AppendToBuffer(", (%s)", cmp_pseudo_op[*data]);
data++;
} else if (f0byte == 0xC6) {
// shufps xmm, xmm/m128, imm8
@@ -2485,10 +2490,9 @@ int DisassemblerIA32::InstructionDecode(v8::base::Vector<char> out_buffer,
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- const char* const pseudo_op[] = {"eq", "lt", "le", "unord", "neq"};
AppendToBuffer("cmppd %s, ", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- AppendToBuffer(", (%s)", pseudo_op[*data]);
+ AppendToBuffer(", (%s)", cmp_pseudo_op[*data]);
data++;
} else if (*data == 0xC4) {
data++;
@@ -2694,10 +2698,7 @@ int DisassemblerIA32::InstructionDecode(v8::base::Vector<char> out_buffer,
data += PrintRightXMMOperand(data);
} else if (b2 == 0xC2) {
// Intel manual 2A, Table 3-18.
- const char* const pseudo_op[] = {
- "cmpeqsd", "cmpltsd", "cmplesd", "cmpunordsd",
- "cmpneqsd", "cmpnltsd", "cmpnlesd", "cmpordsd"};
- AppendToBuffer("%s %s,%s", pseudo_op[data[1]],
+ AppendToBuffer("cmp%ssd %s,%s", cmp_pseudo_op[data[1]],
NameOfXMMRegister(regop), NameOfXMMRegister(rm));
data += 2;
} else {
@@ -2835,10 +2836,7 @@ int DisassemblerIA32::InstructionDecode(v8::base::Vector<char> out_buffer,
data += PrintRightXMMOperand(data);
} else if (b2 == 0xC2) {
// Intel manual 2A, Table 3-18.
- const char* const pseudo_op[] = {
- "cmpeqss", "cmpltss", "cmpless", "cmpunordss",
- "cmpneqss", "cmpnltss", "cmpnless", "cmpordss"};
- AppendToBuffer("%s %s,%s", pseudo_op[data[1]],
+ AppendToBuffer("cmp%sss %s,%s", cmp_pseudo_op[data[1]],
NameOfXMMRegister(regop), NameOfXMMRegister(rm));
data += 2;
} else {
diff --git a/deps/v8/src/diagnostics/loong64/disasm-loong64.cc b/deps/v8/src/diagnostics/loong64/disasm-loong64.cc
new file mode 100644
index 0000000000..1c41a3896a
--- /dev/null
+++ b/deps/v8/src/diagnostics/loong64/disasm-loong64.cc
@@ -0,0 +1,1697 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/base/platform/platform.h"
+#include "src/base/strings.h"
+#include "src/base/vector.h"
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/disasm.h"
+
+namespace v8 {
+namespace internal {
+
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+ Decoder(const disasm::NameConverter& converter,
+ v8::base::Vector<char> out_buffer)
+ : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) {
+ out_buffer_[out_buffer_pos_] = '\0';
+ }
+
+ ~Decoder() {}
+
+ Decoder(const Decoder&) = delete;
+ Decoder& operator=(const Decoder&) = delete;
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(byte* instruction);
+
+ private:
+ // Bottleneck functions to print into the out_buffer.
+ void PrintChar(const char ch);
+ void Print(const char* str);
+
+ // Printing of common values.
+ void PrintRegister(int reg);
+ void PrintFPURegister(int freg);
+ void PrintFPUStatusRegister(int freg);
+ void PrintRj(Instruction* instr);
+ void PrintRk(Instruction* instr);
+ void PrintRd(Instruction* instr);
+ void PrintFj(Instruction* instr);
+ void PrintFk(Instruction* instr);
+ void PrintFd(Instruction* instr);
+ void PrintFa(Instruction* instr);
+ void PrintSa2(Instruction* instr);
+ void PrintSa3(Instruction* instr);
+ void PrintUi5(Instruction* instr);
+ void PrintUi6(Instruction* instr);
+ void PrintUi12(Instruction* instr);
+ void PrintXi12(Instruction* instr);
+ void PrintMsbw(Instruction* instr);
+ void PrintLsbw(Instruction* instr);
+ void PrintMsbd(Instruction* instr);
+ void PrintLsbd(Instruction* instr);
+ // void PrintCond(Instruction* instr);
+ void PrintSi12(Instruction* instr);
+ void PrintSi14(Instruction* instr);
+ void PrintSi16(Instruction* instr);
+ void PrintSi20(Instruction* instr);
+ void PrintCj(Instruction* instr);
+ void PrintCd(Instruction* instr);
+ void PrintCa(Instruction* instr);
+ void PrintCode(Instruction* instr);
+ void PrintHint5(Instruction* instr);
+ void PrintHint15(Instruction* instr);
+ void PrintPCOffs16(Instruction* instr);
+ void PrintPCOffs21(Instruction* instr);
+ void PrintPCOffs26(Instruction* instr);
+ void PrintOffs16(Instruction* instr);
+ void PrintOffs21(Instruction* instr);
+ void PrintOffs26(Instruction* instr);
+
+ // Handle formatting of instructions and their options.
+ int FormatRegister(Instruction* instr, const char* option);
+ int FormatFPURegister(Instruction* instr, const char* option);
+ int FormatOption(Instruction* instr, const char* option);
+ void Format(Instruction* instr, const char* format);
+ void Unknown(Instruction* instr);
+ int DecodeBreakInstr(Instruction* instr);
+
+ // Each of these functions decodes one particular instruction type.
+ int InstructionDecode(Instruction* instr);
+ void DecodeTypekOp6(Instruction* instr);
+ void DecodeTypekOp7(Instruction* instr);
+ void DecodeTypekOp8(Instruction* instr);
+ void DecodeTypekOp10(Instruction* instr);
+ void DecodeTypekOp12(Instruction* instr);
+ void DecodeTypekOp14(Instruction* instr);
+ int DecodeTypekOp17(Instruction* instr);
+ void DecodeTypekOp22(Instruction* instr);
+
+ const disasm::NameConverter& converter_;
+ v8::base::Vector<char> out_buffer_;
+ int out_buffer_pos_;
+};
+
+// Support for assertions in the Decoder formatting functions.
+#define STRING_STARTS_WITH(string, compare_string) \
+ (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; }
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+ char cur = *str++;
+ while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ PrintChar(cur);
+ cur = *str++;
+ }
+ out_buffer_[out_buffer_pos_] = 0;
+}
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+ Print(converter_.NameOfCPURegister(reg));
+}
+
+void Decoder::PrintRj(Instruction* instr) {
+ int reg = instr->RjValue();
+ PrintRegister(reg);
+}
+
+void Decoder::PrintRk(Instruction* instr) {
+ int reg = instr->RkValue();
+ PrintRegister(reg);
+}
+
+void Decoder::PrintRd(Instruction* instr) {
+ int reg = instr->RdValue();
+ PrintRegister(reg);
+}
+
+// Print the FPUregister name according to the active name converter.
+void Decoder::PrintFPURegister(int freg) {
+ Print(converter_.NameOfXMMRegister(freg));
+}
+
+void Decoder::PrintFj(Instruction* instr) {
+ int freg = instr->FjValue();
+ PrintFPURegister(freg);
+}
+
+void Decoder::PrintFk(Instruction* instr) {
+ int freg = instr->FkValue();
+ PrintFPURegister(freg);
+}
+
+void Decoder::PrintFd(Instruction* instr) {
+ int freg = instr->FdValue();
+ PrintFPURegister(freg);
+}
+
+void Decoder::PrintFa(Instruction* instr) {
+ int freg = instr->FaValue();
+ PrintFPURegister(freg);
+}
+
+// Print the integer value of the sa field.
+void Decoder::PrintSa2(Instruction* instr) {
+ int sa = instr->Sa2Value();
+ uint32_t opcode = (instr->InstructionBits() >> 18) << 18;
+ if (opcode == ALSL || opcode == ALSL_D) {
+ sa += 1;
+ }
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
+}
+
+void Decoder::PrintSa3(Instruction* instr) {
+ int sa = instr->Sa3Value();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
+}
+
+void Decoder::PrintUi5(Instruction* instr) {
+ int ui = instr->Ui5Value();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ui);
+}
+
+void Decoder::PrintUi6(Instruction* instr) {
+ int ui = instr->Ui6Value();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ui);
+}
+
+void Decoder::PrintUi12(Instruction* instr) {
+ int ui = instr->Ui12Value();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ui);
+}
+
+void Decoder::PrintXi12(Instruction* instr) {
+ int xi = instr->Ui12Value();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", xi);
+}
+
+void Decoder::PrintMsbd(Instruction* instr) {
+ int msbd = instr->MsbdValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", msbd);
+}
+
+void Decoder::PrintLsbd(Instruction* instr) {
+ int lsbd = instr->LsbdValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", lsbd);
+}
+
+void Decoder::PrintMsbw(Instruction* instr) {
+ int msbw = instr->MsbwValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", msbw);
+}
+
+void Decoder::PrintLsbw(Instruction* instr) {
+ int lsbw = instr->LsbwValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", lsbw);
+}
+
+void Decoder::PrintSi12(Instruction* instr) {
+ int si = ((instr->Si12Value()) << (32 - kSi12Bits)) >> (32 - kSi12Bits);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si);
+}
+
+void Decoder::PrintSi14(Instruction* instr) {
+ int si = ((instr->Si14Value()) << (32 - kSi14Bits)) >> (32 - kSi14Bits);
+ si <<= 2;
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si);
+}
+
+void Decoder::PrintSi16(Instruction* instr) {
+ int si = ((instr->Si16Value()) << (32 - kSi16Bits)) >> (32 - kSi16Bits);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si);
+}
+
+void Decoder::PrintSi20(Instruction* instr) {
+ int si = ((instr->Si20Value()) << (32 - kSi20Bits)) >> (32 - kSi20Bits);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si);
+}
+
+void Decoder::PrintCj(Instruction* instr) {
+ int cj = instr->CjValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", cj);
+}
+
+void Decoder::PrintCd(Instruction* instr) {
+ int cd = instr->CdValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", cd);
+}
+
+void Decoder::PrintCa(Instruction* instr) {
+ int ca = instr->CaValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ca);
+}
+
+void Decoder::PrintCode(Instruction* instr) {
+ int code = instr->CodeValue();
+ out_buffer_pos_ +=
+ base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x(%u)", code, code);
+}
+
+void Decoder::PrintHint5(Instruction* instr) {
+ int hint = instr->Hint5Value();
+ out_buffer_pos_ +=
+ base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x(%u)", hint, hint);
+}
+
+void Decoder::PrintHint15(Instruction* instr) {
+ int hint = instr->Hint15Value();
+ out_buffer_pos_ +=
+ base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x(%u)", hint, hint);
+}
+
+void Decoder::PrintPCOffs16(Instruction* instr) {
+ int n_bits = 2;
+ int offs = instr->Offs16Value();
+ int target = ((offs << n_bits) << (32 - kOffsLowBits - n_bits)) >>
+ (32 - kOffsLowBits - n_bits);
+ out_buffer_pos_ += base::SNPrintF(
+ out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + target));
+}
+
+void Decoder::PrintPCOffs21(Instruction* instr) {
+ int n_bits = 2;
+ int offs = instr->Offs21Value();
+ int target =
+ ((offs << n_bits) << (32 - kOffsLowBits - kOffs21HighBits - n_bits)) >>
+ (32 - kOffsLowBits - kOffs21HighBits - n_bits);
+ out_buffer_pos_ += base::SNPrintF(
+ out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + target));
+}
+
+void Decoder::PrintPCOffs26(Instruction* instr) {
+ int n_bits = 2;
+ int offs = instr->Offs26Value();
+ int target =
+ ((offs << n_bits) << (32 - kOffsLowBits - kOffs26HighBits - n_bits)) >>
+ (32 - kOffsLowBits - kOffs26HighBits - n_bits);
+ out_buffer_pos_ += base::SNPrintF(
+ out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + target));
+}
+
+void Decoder::PrintOffs16(Instruction* instr) {
+ int offs = instr->Offs16Value();
+ offs <<= (32 - kOffsLowBits);
+ offs >>= (32 - kOffsLowBits);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs);
+}
+
+void Decoder::PrintOffs21(Instruction* instr) {
+ int offs = instr->Offs21Value();
+ offs <<= (32 - kOffsLowBits - kOffs21HighBits);
+ offs >>= (32 - kOffsLowBits - kOffs21HighBits);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs);
+}
+
+void Decoder::PrintOffs26(Instruction* instr) {
+ int offs = instr->Offs26Value();
+ offs <<= (32 - kOffsLowBits - kOffs26HighBits);
+ offs >>= (32 - kOffsLowBits - kOffs26HighBits);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs);
+}
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
+ DCHECK_EQ(format[0], 'r');
+ if (format[1] == 'j') { // 'rj: Rj register.
+ int reg = instr->RjValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'k') { // 'rk: rk register.
+ int reg = instr->RkValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'rd: rd register.
+ int reg = instr->RdValue();
+ PrintRegister(reg);
+ return 2;
+ }
+ UNREACHABLE();
+}
+
+// Handle all FPUregister based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
+ DCHECK_EQ(format[0], 'f');
+ if (format[1] == 'j') { // 'fj: fj register.
+ int reg = instr->FjValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'k') { // 'fk: fk register.
+ int reg = instr->FkValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'fd: fd register.
+ int reg = instr->FdValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'a') { // 'fa: fa register.
+ int reg = instr->FaValue();
+ PrintFPURegister(reg);
+ return 2;
+ }
+ UNREACHABLE();
+}
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.) FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instruction* instr, const char* format) {
+ switch (format[0]) {
+ case 'c': {
+ switch (format[1]) {
+ case 'a':
+ DCHECK(STRING_STARTS_WITH(format, "ca"));
+ PrintCa(instr);
+ return 2;
+ case 'd':
+ DCHECK(STRING_STARTS_WITH(format, "cd"));
+ PrintCd(instr);
+ return 2;
+ case 'j':
+ DCHECK(STRING_STARTS_WITH(format, "cj"));
+ PrintCj(instr);
+ return 2;
+ case 'o':
+ DCHECK(STRING_STARTS_WITH(format, "code"));
+ PrintCode(instr);
+ return 4;
+ }
+ }
+ case 'f': {
+ return FormatFPURegister(instr, format);
+ }
+ case 'h': {
+ if (format[4] == '5') {
+ DCHECK(STRING_STARTS_WITH(format, "hint5"));
+ PrintHint5(instr);
+ return 5;
+ } else if (format[4] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "hint15"));
+ PrintHint15(instr);
+ return 6;
+ }
+ break;
+ }
+ case 'l': {
+ switch (format[3]) {
+ case 'w':
+ DCHECK(STRING_STARTS_WITH(format, "lsbw"));
+ PrintLsbw(instr);
+ return 4;
+ case 'd':
+ DCHECK(STRING_STARTS_WITH(format, "lsbd"));
+ PrintLsbd(instr);
+ return 4;
+ default:
+ return 0;
+ }
+ }
+ case 'm': {
+ if (format[3] == 'w') {
+ DCHECK(STRING_STARTS_WITH(format, "msbw"));
+ PrintMsbw(instr);
+ } else if (format[3] == 'd') {
+ DCHECK(STRING_STARTS_WITH(format, "msbd"));
+ PrintMsbd(instr);
+ }
+ return 4;
+ }
+ case 'o': {
+ if (format[1] == 'f') {
+ if (format[4] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "offs16"));
+ PrintOffs16(instr);
+ return 6;
+ } else if (format[4] == '2') {
+ if (format[5] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "offs21"));
+ PrintOffs21(instr);
+ return 6;
+ } else if (format[5] == '6') {
+ DCHECK(STRING_STARTS_WITH(format, "offs26"));
+ PrintOffs26(instr);
+ return 6;
+ }
+ }
+ }
+ break;
+ }
+ case 'p': {
+ if (format[6] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "pcoffs16"));
+ PrintPCOffs16(instr);
+ return 8;
+ } else if (format[6] == '2') {
+ if (format[7] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "pcoffs21"));
+ PrintPCOffs21(instr);
+ return 8;
+ } else if (format[7] == '6') {
+ DCHECK(STRING_STARTS_WITH(format, "pcoffs26"));
+ PrintPCOffs26(instr);
+ return 8;
+ }
+ }
+ break;
+ }
+ case 'r': {
+ return FormatRegister(instr, format);
+ }
+ case 's': {
+ switch (format[1]) {
+ case 'a':
+ if (format[2] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "sa2"));
+ PrintSa2(instr);
+ } else if (format[2] == '3') {
+ DCHECK(STRING_STARTS_WITH(format, "sa3"));
+ PrintSa3(instr);
+ }
+ return 3;
+ case 'i':
+ if (format[2] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "si20"));
+ PrintSi20(instr);
+ return 4;
+ } else if (format[2] == '1') {
+ switch (format[3]) {
+ case '2':
+ DCHECK(STRING_STARTS_WITH(format, "si12"));
+ PrintSi12(instr);
+ return 4;
+ case '4':
+ DCHECK(STRING_STARTS_WITH(format, "si14"));
+ PrintSi14(instr);
+ return 4;
+ case '6':
+ DCHECK(STRING_STARTS_WITH(format, "si16"));
+ PrintSi16(instr);
+ return 4;
+ default:
+ break;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ case 'u': {
+ if (format[2] == '5') {
+ DCHECK(STRING_STARTS_WITH(format, "ui5"));
+ PrintUi5(instr);
+ return 3;
+ } else if (format[2] == '6') {
+ DCHECK(STRING_STARTS_WITH(format, "ui6"));
+ PrintUi6(instr);
+ return 3;
+ } else if (format[2] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "ui12"));
+ PrintUi12(instr);
+ return 4;
+ }
+ break;
+ }
+ case 'x': {
+ DCHECK(STRING_STARTS_WITH(format, "xi12"));
+ PrintXi12(instr);
+ return 4;
+ }
+ default:
+ UNREACHABLE();
+ }
+ return 0;
+}
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instruction* instr, const char* format) {
+ char cur = *format++;
+ while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ if (cur == '\'') { // Single quote is used as the formatting escape.
+ format += FormatOption(instr, format);
+ } else {
+ out_buffer_[out_buffer_pos_++] = cur;
+ }
+ cur = *format++;
+ }
+ out_buffer_[out_buffer_pos_] = '\0';
+}
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); }
+
+int Decoder::DecodeBreakInstr(Instruction* instr) {
+ // This is already known to be BREAK instr, just extract the code.
+ /*if (instr->Bits(14, 0) == static_cast<int>(kMaxStopCode)) {
+ // This is stop(msg).
+ Format(instr, "break, code: 'code");
+ out_buffer_pos_ += SNPrintF(
+ out_buffer_ + out_buffer_pos_, "\n%p %08" PRIx64,
+ static_cast<void*>(reinterpret_cast<int32_t*>(instr + kInstrSize)),
+ reinterpret_cast<uint64_t>(
+ *reinterpret_cast<char**>(instr + kInstrSize)));
+ // Size 3: the break_ instr, plus embedded 64-bit char pointer.
+ return 3 * kInstrSize;
+ } else {
+ Format(instr, "break, code: 'code");
+ return kInstrSize;
+ }*/
+ Format(instr, "break code: 'code");
+ return kInstrSize;
+} //===================================================
+
+void Decoder::DecodeTypekOp6(Instruction* instr) {
+ switch (instr->Bits(31, 26) << 26) {
+ case ADDU16I_D:
+ Format(instr, "addu16i.d 'rd, 'rj, 'si16");
+ break;
+ case BEQZ:
+ Format(instr, "beqz 'rj, 'offs21 -> 'pcoffs21");
+ break;
+ case BNEZ:
+ Format(instr, "bnez 'rj, 'offs21 -> 'pcoffs21");
+ break;
+ case BCZ:
+ if (instr->Bit(8))
+ Format(instr, "bcnez fcc'cj, 'offs21 -> 'pcoffs21");
+ else
+ Format(instr, "bceqz fcc'cj, 'offs21 -> 'pcoffs21");
+ break;
+ case JIRL:
+ Format(instr, "jirl 'rd, 'rj, 'offs16");
+ break;
+ case B:
+ Format(instr, "b 'offs26 -> 'pcoffs26");
+ break;
+ case BL:
+ Format(instr, "bl 'offs26 -> 'pcoffs26");
+ break;
+ case BEQ:
+ Format(instr, "beq 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ break;
+ case BNE:
+ Format(instr, "bne 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ break;
+ case BLT:
+ Format(instr, "blt 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ break;
+ case BGE:
+ Format(instr, "bge 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ break;
+ case BLTU:
+ Format(instr, "bltu 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ break;
+ case BGEU:
+ Format(instr, "bgeu 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Decoder::DecodeTypekOp7(Instruction* instr) {
+ switch (instr->Bits(31, 25) << 25) {
+ case LU12I_W:
+ Format(instr, "lu12i.w 'rd, 'si20");
+ break;
+ case LU32I_D:
+ Format(instr, "lu32i.d 'rd, 'si20");
+ break;
+ case PCADDI:
+ Format(instr, "pcaddi 'rd, 'si20");
+ break;
+ case PCALAU12I:
+ Format(instr, "pcalau12i 'rd, 'si20");
+ break;
+ case PCADDU12I:
+ Format(instr, "pcaddu12i 'rd, 'si20");
+ break;
+ case PCADDU18I:
+ Format(instr, "pcaddu18i 'rd, 'si20");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Decoder::DecodeTypekOp8(Instruction* instr) {
+ switch (instr->Bits(31, 24) << 24) {
+ case LDPTR_W:
+ Format(instr, "ldptr.w 'rd, 'rj, 'si14");
+ break;
+ case STPTR_W:
+ Format(instr, "stptr.w 'rd, 'rj, 'si14");
+ break;
+ case LDPTR_D:
+ Format(instr, "ldptr.d 'rd, 'rj, 'si14");
+ break;
+ case STPTR_D:
+ Format(instr, "stptr.d 'rd, 'rj, 'si14");
+ break;
+ case LL_W:
+ Format(instr, "ll.w 'rd, 'rj, 'si14");
+ break;
+ case SC_W:
+ Format(instr, "sc.w 'rd, 'rj, 'si14");
+ break;
+ case LL_D:
+ Format(instr, "ll.d 'rd, 'rj, 'si14");
+ break;
+ case SC_D:
+ Format(instr, "sc.d 'rd, 'rj, 'si14");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Decoder::DecodeTypekOp10(Instruction* instr) {
+ switch (instr->Bits(31, 22) << 22) {
+ case BSTR_W: {
+ if (instr->Bit(21) != 0) {
+ if (instr->Bit(15) == 0) {
+ Format(instr, "bstrins.w 'rd, 'rj, 'msbw, 'lsbw");
+ } else {
+ Format(instr, "bstrpick.w 'rd, 'rj, 'msbw, 'lsbw");
+ }
+ }
+ break;
+ }
+ case BSTRINS_D:
+ Format(instr, "bstrins.d 'rd, 'rj, 'msbd, 'lsbd");
+ break;
+ case BSTRPICK_D:
+ Format(instr, "bstrpick.d 'rd, 'rj, 'msbd, 'lsbd");
+ break;
+ case SLTI:
+ Format(instr, "slti 'rd, 'rj, 'si12");
+ break;
+ case SLTUI:
+ Format(instr, "sltui 'rd, 'rj, 'si12");
+ break;
+ case ADDI_W:
+ Format(instr, "addi.w 'rd, 'rj, 'si12");
+ break;
+ case ADDI_D:
+ Format(instr, "addi.d 'rd, 'rj, 'si12");
+ break;
+ case LU52I_D:
+ Format(instr, "lu52i.d 'rd, 'rj, 'si12");
+ break;
+ case ANDI:
+ Format(instr, "andi 'rd, 'rj, 'xi12");
+ break;
+ case ORI:
+ Format(instr, "ori 'rd, 'rj, 'xi12");
+ break;
+ case XORI:
+ Format(instr, "xori 'rd, 'rj, 'xi12");
+ break;
+ case LD_B:
+ Format(instr, "ld.b 'rd, 'rj, 'si12");
+ break;
+ case LD_H:
+ Format(instr, "ld.h 'rd, 'rj, 'si12");
+ break;
+ case LD_W:
+ Format(instr, "ld.w 'rd, 'rj, 'si12");
+ break;
+ case LD_D:
+ Format(instr, "ld.d 'rd, 'rj, 'si12");
+ break;
+ case ST_B:
+ Format(instr, "st.b 'rd, 'rj, 'si12");
+ break;
+ case ST_H:
+ Format(instr, "st.h 'rd, 'rj, 'si12");
+ break;
+ case ST_W:
+ Format(instr, "st.w 'rd, 'rj, 'si12");
+ break;
+ case ST_D:
+ Format(instr, "st.d 'rd, 'rj, 'si12");
+ break;
+ case LD_BU:
+ Format(instr, "ld.bu 'rd, 'rj, 'si12");
+ break;
+ case LD_HU:
+ Format(instr, "ld.hu 'rd, 'rj, 'si12");
+ break;
+ case LD_WU:
+ Format(instr, "ld.wu 'rd, 'rj, 'si12");
+ break;
+ case FLD_S:
+ Format(instr, "fld.s 'fd, 'rj, 'si12");
+ break;
+ case FST_S:
+ Format(instr, "fst.s 'fd, 'rj, 'si12");
+ break;
+ case FLD_D:
+ Format(instr, "fld.d 'fd, 'rj, 'si12");
+ break;
+ case FST_D:
+ Format(instr, "fst.d 'fd, 'rj, 'si12");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Decoder::DecodeTypekOp12(Instruction* instr) {
+ switch (instr->Bits(31, 20) << 20) {
+ case FMADD_S:
+ Format(instr, "fmadd.s 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FMADD_D:
+ Format(instr, "fmadd.d 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FMSUB_S:
+ Format(instr, "fmsub.s 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FMSUB_D:
+ Format(instr, "fmsub.d 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FNMADD_S:
+ Format(instr, "fnmadd.s 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FNMADD_D:
+ Format(instr, "fnmadd.d 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FNMSUB_S:
+ Format(instr, "fnmsub.s 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FNMSUB_D:
+ Format(instr, "fnmsub.d 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FCMP_COND_S:
+ switch (instr->Bits(19, 15)) {
+ case CAF:
+ Format(instr, "fcmp.caf.s fcc'cd, 'fj, 'fk");
+ break;
+ case SAF:
+ Format(instr, "fcmp.saf.s fcc'cd, 'fj, 'fk");
+ break;
+ case CLT:
+ Format(instr, "fcmp.clt.s fcc'cd, 'fj, 'fk");
+ break;
+ case CEQ:
+ Format(instr, "fcmp.ceq.s fcc'cd, 'fj, 'fk");
+ break;
+ case SEQ:
+ Format(instr, "fcmp.seq.s fcc'cd, 'fj, 'fk");
+ break;
+ case CLE:
+ Format(instr, "fcmp.cle.s fcc'cd, 'fj, 'fk");
+ break;
+ case SLE:
+ Format(instr, "fcmp.sle.s fcc'cd, 'fj, 'fk");
+ break;
+ case CUN:
+ Format(instr, "fcmp.cun.s fcc'cd, 'fj, 'fk");
+ break;
+ case SUN:
+ Format(instr, "fcmp.sun.s fcc'cd, 'fj, 'fk");
+ break;
+ case CULT:
+ Format(instr, "fcmp.cult.s fcc'cd, 'fj, 'fk");
+ break;
+ case SULT:
+ Format(instr, "fcmp.sult.s fcc'cd, 'fj, 'fk");
+ break;
+ case CUEQ:
+ Format(instr, "fcmp.cueq.s fcc'cd, 'fj, 'fk");
+ break;
+ case SUEQ:
+ Format(instr, "fcmp.sueq.s fcc'cd, 'fj, 'fk");
+ break;
+ case CULE:
+ Format(instr, "fcmp.cule.s fcc'cd, 'fj, 'fk");
+ break;
+ case SULE:
+ Format(instr, "fcmp.sule.s fcc'cd, 'fj, 'fk");
+ break;
+ case CNE:
+ Format(instr, "fcmp.cne.s fcc'cd, 'fj, 'fk");
+ break;
+ case SNE:
+ Format(instr, "fcmp.sne.s fcc'cd, 'fj, 'fk");
+ break;
+ case COR:
+ Format(instr, "fcmp.cor.s fcc'cd, 'fj, 'fk");
+ break;
+ case SOR:
+ Format(instr, "fcmp.sor.s fcc'cd, 'fj, 'fk");
+ break;
+ case CUNE:
+ Format(instr, "fcmp.cune.s fcc'cd, 'fj, 'fk");
+ break;
+ case SUNE:
+ Format(instr, "fcmp.sune.s fcc'cd, 'fj, 'fk");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case FCMP_COND_D:
+ switch (instr->Bits(19, 15)) {
+ case CAF:
+ Format(instr, "fcmp.caf.d fcc'cd, 'fj, 'fk");
+ break;
+ case SAF:
+ Format(instr, "fcmp.saf.d fcc'cd, 'fj, 'fk");
+ break;
+ case CLT:
+ Format(instr, "fcmp.clt.d fcc'cd, 'fj, 'fk");
+ break;
+ case CEQ:
+ Format(instr, "fcmp.ceq.d fcc'cd, 'fj, 'fk");
+ break;
+ case SEQ:
+ Format(instr, "fcmp.seq.d fcc'cd, 'fj, 'fk");
+ break;
+ case CLE:
+ Format(instr, "fcmp.cle.d fcc'cd, 'fj, 'fk");
+ break;
+ case SLE:
+ Format(instr, "fcmp.sle.d fcc'cd, 'fj, 'fk");
+ break;
+ case CUN:
+ Format(instr, "fcmp.cun.d fcc'cd, 'fj, 'fk");
+ break;
+ case SUN:
+ Format(instr, "fcmp.sun.d fcc'cd, 'fj, 'fk");
+ break;
+ case CULT:
+ Format(instr, "fcmp.cult.d fcc'cd, 'fj, 'fk");
+ break;
+ case SULT:
+ Format(instr, "fcmp.sult.d fcc'cd, 'fj, 'fk");
+ break;
+ case CUEQ:
+ Format(instr, "fcmp.cueq.d fcc'cd, 'fj, 'fk");
+ break;
+ case SUEQ:
+ Format(instr, "fcmp.sueq.d fcc'cd, 'fj, 'fk");
+ break;
+ case CULE:
+ Format(instr, "fcmp.cule.d fcc'cd, 'fj, 'fk");
+ break;
+ case SULE:
+ Format(instr, "fcmp.sule.d fcc'cd, 'fj, 'fk");
+ break;
+ case CNE:
+ Format(instr, "fcmp.cne.d fcc'cd, 'fj, 'fk");
+ break;
+ case SNE:
+ Format(instr, "fcmp.sne.d fcc'cd, 'fj, 'fk");
+ break;
+ case COR:
+ Format(instr, "fcmp.cor.d fcc'cd, 'fj, 'fk");
+ break;
+ case SOR:
+ Format(instr, "fcmp.sor.d fcc'cd, 'fj, 'fk");
+ break;
+ case CUNE:
+ Format(instr, "fcmp.cune.d fcc'cd, 'fj, 'fk");
+ break;
+ case SUNE:
+ Format(instr, "fcmp.sune.d fcc'cd, 'fj, 'fk");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case FSEL:
+ Format(instr, "fsel 'fd, 'fj, 'fk, fcc'ca");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Decoder::DecodeTypekOp14(Instruction* instr) {
+ switch (instr->Bits(31, 18) << 18) {
+ case ALSL:
+ if (instr->Bit(17))
+ Format(instr, "alsl.wu 'rd, 'rj, 'rk, 'sa2");
+ else
+ Format(instr, "alsl.w 'rd, 'rj, 'rk, 'sa2");
+ break;
+ case BYTEPICK_W:
+ Format(instr, "bytepick.w 'rd, 'rj, 'rk, 'sa2");
+ break;
+ case BYTEPICK_D:
+ Format(instr, "bytepick.d 'rd, 'rj, 'rk, 'sa3");
+ break;
+ case ALSL_D:
+ Format(instr, "alsl.d 'rd, 'rj, 'rk, 'sa2");
+ break;
+ case SLLI:
+ if (instr->Bit(16))
+ Format(instr, "slli.d 'rd, 'rj, 'ui6");
+ else
+ Format(instr, "slli.w 'rd, 'rj, 'ui5");
+ break;
+ case SRLI:
+ if (instr->Bit(16))
+ Format(instr, "srli.d 'rd, 'rj, 'ui6");
+ else
+ Format(instr, "srli.w 'rd, 'rj, 'ui5");
+ break;
+ case SRAI:
+ if (instr->Bit(16))
+ Format(instr, "srai.d 'rd, 'rj, 'ui6");
+ else
+ Format(instr, "srai.w 'rd, 'rj, 'ui5");
+ break;
+ case ROTRI:
+ if (instr->Bit(16))
+ Format(instr, "rotri.d 'rd, 'rj, 'ui6");
+ else
+ Format(instr, "rotri.w 'rd, 'rj, 'ui5");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+int Decoder::DecodeTypekOp17(Instruction* instr) {
+ switch (instr->Bits(31, 15) << 15) {
+ case ADD_W:
+ Format(instr, "add.w 'rd, 'rj, 'rk");
+ break;
+ case ADD_D:
+ Format(instr, "add.d 'rd, 'rj, 'rk");
+ break;
+ case SUB_W:
+ Format(instr, "sub.w 'rd, 'rj, 'rk");
+ break;
+ case SUB_D:
+ Format(instr, "sub.d 'rd, 'rj, 'rk");
+ break;
+ case SLT:
+ Format(instr, "slt 'rd, 'rj, 'rk");
+ break;
+ case SLTU:
+ Format(instr, "sltu 'rd, 'rj, 'rk");
+ break;
+ case MASKEQZ:
+ Format(instr, "maskeqz 'rd, 'rj, 'rk");
+ break;
+ case MASKNEZ:
+ Format(instr, "masknez 'rd, 'rj, 'rk");
+ break;
+ case NOR:
+ Format(instr, "nor 'rd, 'rj, 'rk");
+ break;
+ case AND:
+ Format(instr, "and 'rd, 'rj, 'rk");
+ break;
+ case OR:
+ Format(instr, "or 'rd, 'rj, 'rk");
+ break;
+ case XOR:
+ Format(instr, "xor 'rd, 'rj, 'rk");
+ break;
+ case ORN:
+ Format(instr, "orn 'rd, 'rj, 'rk");
+ break;
+ case ANDN:
+ Format(instr, "andn 'rd, 'rj, 'rk");
+ break;
+ case SLL_W:
+ Format(instr, "sll.w 'rd, 'rj, 'rk");
+ break;
+ case SRL_W:
+ Format(instr, "srl.w 'rd, 'rj, 'rk");
+ break;
+ case SRA_W:
+ Format(instr, "sra.w 'rd, 'rj, 'rk");
+ break;
+ case SLL_D:
+ Format(instr, "sll.d 'rd, 'rj, 'rk");
+ break;
+ case SRL_D:
+ Format(instr, "srl.d 'rd, 'rj, 'rk");
+ break;
+ case SRA_D:
+ Format(instr, "sra.d 'rd, 'rj, 'rk");
+ break;
+ case ROTR_D:
+ Format(instr, "rotr.d 'rd, 'rj, 'rk");
+ break;
+ case ROTR_W:
+ Format(instr, "rotr.w 'rd, 'rj, 'rk");
+ break;
+ case MUL_W:
+ Format(instr, "mul.w 'rd, 'rj, 'rk");
+ break;
+ case MULH_W:
+ Format(instr, "mulh.w 'rd, 'rj, 'rk");
+ break;
+ case MULH_WU:
+ Format(instr, "mulh.wu 'rd, 'rj, 'rk");
+ break;
+ case MUL_D:
+ Format(instr, "mul.d 'rd, 'rj, 'rk");
+ break;
+ case MULH_D:
+ Format(instr, "mulh.d 'rd, 'rj, 'rk");
+ break;
+ case MULH_DU:
+ Format(instr, "mulh.du 'rd, 'rj, 'rk");
+ break;
+ case MULW_D_W:
+ Format(instr, "mulw.d.w 'rd, 'rj, 'rk");
+ break;
+ case MULW_D_WU:
+ Format(instr, "mulw.d.wu 'rd, 'rj, 'rk");
+ break;
+ case DIV_W:
+ Format(instr, "div.w 'rd, 'rj, 'rk");
+ break;
+ case MOD_W:
+ Format(instr, "mod.w 'rd, 'rj, 'rk");
+ break;
+ case DIV_WU:
+ Format(instr, "div.wu 'rd, 'rj, 'rk");
+ break;
+ case MOD_WU:
+ Format(instr, "mod.wu 'rd, 'rj, 'rk");
+ break;
+ case DIV_D:
+ Format(instr, "div.d 'rd, 'rj, 'rk");
+ break;
+ case MOD_D:
+ Format(instr, "mod.d 'rd, 'rj, 'rk");
+ break;
+ case DIV_DU:
+ Format(instr, "div.du 'rd, 'rj, 'rk");
+ break;
+ case MOD_DU:
+ Format(instr, "mod.du 'rd, 'rj, 'rk");
+ break;
+ case BREAK:
+ return DecodeBreakInstr(instr);
+ case FADD_S:
+ Format(instr, "fadd.s 'fd, 'fj, 'fk");
+ break;
+ case FADD_D:
+ Format(instr, "fadd.d 'fd, 'fj, 'fk");
+ break;
+ case FSUB_S:
+ Format(instr, "fsub.s 'fd, 'fj, 'fk");
+ break;
+ case FSUB_D:
+ Format(instr, "fsub.d 'fd, 'fj, 'fk");
+ break;
+ case FMUL_S:
+ Format(instr, "fmul.s 'fd, 'fj, 'fk");
+ break;
+ case FMUL_D:
+ Format(instr, "fmul.d 'fd, 'fj, 'fk");
+ break;
+ case FDIV_S:
+ Format(instr, "fdiv.s 'fd, 'fj, 'fk");
+ break;
+ case FDIV_D:
+ Format(instr, "fdiv.d 'fd, 'fj, 'fk");
+ break;
+ case FMAX_S:
+ Format(instr, "fmax.s 'fd, 'fj, 'fk");
+ break;
+ case FMAX_D:
+ Format(instr, "fmax.d 'fd, 'fj, 'fk");
+ break;
+ case FMIN_S:
+ Format(instr, "fmin.s 'fd, 'fj, 'fk");
+ break;
+ case FMIN_D:
+ Format(instr, "fmin.d 'fd, 'fj, 'fk");
+ break;
+ case FMAXA_S:
+ Format(instr, "fmaxa.s 'fd, 'fj, 'fk");
+ break;
+ case FMAXA_D:
+ Format(instr, "fmaxa.d 'fd, 'fj, 'fk");
+ break;
+ case FMINA_S:
+ Format(instr, "fmina.s 'fd, 'fj, 'fk");
+ break;
+ case FMINA_D:
+ Format(instr, "fmina.d 'fd, 'fj, 'fk");
+ break;
+ case LDX_B:
+ Format(instr, "ldx.b 'rd, 'rj, 'rk");
+ break;
+ case LDX_H:
+ Format(instr, "ldx.h 'rd, 'rj, 'rk");
+ break;
+ case LDX_W:
+ Format(instr, "ldx.w 'rd, 'rj, 'rk");
+ break;
+ case LDX_D:
+ Format(instr, "ldx.d 'rd, 'rj, 'rk");
+ break;
+ case STX_B:
+ Format(instr, "stx.b 'rd, 'rj, 'rk");
+ break;
+ case STX_H:
+ Format(instr, "stx.h 'rd, 'rj, 'rk");
+ break;
+ case STX_W:
+ Format(instr, "stx.w 'rd, 'rj, 'rk");
+ break;
+ case STX_D:
+ Format(instr, "stx.d 'rd, 'rj, 'rk");
+ break;
+ case LDX_BU:
+ Format(instr, "ldx.bu 'rd, 'rj, 'rk");
+ break;
+ case LDX_HU:
+ Format(instr, "ldx.hu 'rd, 'rj, 'rk");
+ break;
+ case LDX_WU:
+ Format(instr, "ldx.wu 'rd, 'rj, 'rk");
+ break;
+ case FLDX_S:
+ Format(instr, "fldx.s 'fd, 'rj, 'rk");
+ break;
+ case FLDX_D:
+ Format(instr, "fldx.d 'fd, 'rj, 'rk");
+ break;
+ case FSTX_S:
+ Format(instr, "fstx.s 'fd, 'rj, 'rk");
+ break;
+ case FSTX_D:
+ Format(instr, "fstx.d 'fd, 'rj, 'rk");
+ break;
+ case AMSWAP_W:
+ Format(instr, "amswap.w 'rd, 'rk, 'rj");
+ break;
+ case AMSWAP_D:
+ Format(instr, "amswap.d 'rd, 'rk, 'rj");
+ break;
+ case AMADD_W:
+ Format(instr, "amadd.w 'rd, 'rk, 'rj");
+ break;
+ case AMADD_D:
+ Format(instr, "amadd.d 'rd, 'rk, 'rj");
+ break;
+ case AMAND_W:
+ Format(instr, "amand.w 'rd, 'rk, 'rj");
+ break;
+ case AMAND_D:
+ Format(instr, "amand.d 'rd, 'rk, 'rj");
+ break;
+ case AMOR_W:
+ Format(instr, "amor.w 'rd, 'rk, 'rj");
+ break;
+ case AMOR_D:
+ Format(instr, "amor.d 'rd, 'rk, 'rj");
+ break;
+ case AMXOR_W:
+ Format(instr, "amxor.w 'rd, 'rk, 'rj");
+ break;
+ case AMXOR_D:
+ Format(instr, "amxor.d 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_W:
+ Format(instr, "ammax.w 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_D:
+ Format(instr, "ammax.d 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_W:
+ Format(instr, "ammin.w 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_D:
+ Format(instr, "ammin.d 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_WU:
+ Format(instr, "ammax.wu 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_DU:
+ Format(instr, "ammax.du 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_WU:
+ Format(instr, "ammin.wu 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_DU:
+ Format(instr, "ammin.du 'rd, 'rk, 'rj");
+ break;
+ case AMSWAP_DB_W:
+ Format(instr, "amswap_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMSWAP_DB_D:
+ Format(instr, "amswap_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMADD_DB_W:
+ Format(instr, "amadd_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMADD_DB_D:
+ Format(instr, "amadd_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMAND_DB_W:
+ Format(instr, "amand_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMAND_DB_D:
+ Format(instr, "amand_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMOR_DB_W:
+ Format(instr, "amor_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMOR_DB_D:
+ Format(instr, "amor_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMXOR_DB_W:
+ Format(instr, "amxor_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMXOR_DB_D:
+ Format(instr, "amxor_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_DB_W:
+ Format(instr, "ammax_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_DB_D:
+ Format(instr, "ammax_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_DB_W:
+ Format(instr, "ammin_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_DB_D:
+ Format(instr, "ammin_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_DB_WU:
+ Format(instr, "ammax_db.wu 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_DB_DU:
+ Format(instr, "ammax_db.du 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_DB_WU:
+ Format(instr, "ammin_db.wu 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_DB_DU:
+ Format(instr, "ammin_db.du 'rd, 'rk, 'rj");
+ break;
+ case DBAR:
+ Format(instr, "dbar 'hint15");
+ break;
+ case IBAR:
+ Format(instr, "ibar 'hint15");
+ break;
+ case FSCALEB_S:
+ Format(instr, "fscaleb.s 'fd, 'fj, 'fk");
+ break;
+ case FSCALEB_D:
+ Format(instr, "fscaleb.d 'fd, 'fj, 'fk");
+ break;
+ case FCOPYSIGN_S:
+ Format(instr, "fcopysign.s 'fd, 'fj, 'fk");
+ break;
+ case FCOPYSIGN_D:
+ Format(instr, "fcopysign.d 'fd, 'fj, 'fk");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return kInstrSize;
+}
+
+void Decoder::DecodeTypekOp22(Instruction* instr) {
+ switch (instr->Bits(31, 10) << 10) {
+ case CLZ_W:
+ Format(instr, "clz.w 'rd, 'rj");
+ break;
+ case CTZ_W:
+ Format(instr, "ctz.w 'rd, 'rj");
+ break;
+ case CLZ_D:
+ Format(instr, "clz.d 'rd, 'rj");
+ break;
+ case CTZ_D:
+ Format(instr, "ctz.d 'rd, 'rj");
+ break;
+ case REVB_2H:
+ Format(instr, "revb.2h 'rd, 'rj");
+ break;
+ case REVB_4H:
+ Format(instr, "revb.4h 'rd, 'rj");
+ break;
+ case REVB_2W:
+ Format(instr, "revb.2w 'rd, 'rj");
+ break;
+ case REVB_D:
+ Format(instr, "revb.d 'rd, 'rj");
+ break;
+ case REVH_2W:
+ Format(instr, "revh.2w 'rd, 'rj");
+ break;
+ case REVH_D:
+ Format(instr, "revh.d 'rd, 'rj");
+ break;
+ case BITREV_4B:
+ Format(instr, "bitrev.4b 'rd, 'rj");
+ break;
+ case BITREV_8B:
+ Format(instr, "bitrev.8b 'rd, 'rj");
+ break;
+ case BITREV_W:
+ Format(instr, "bitrev.w 'rd, 'rj");
+ break;
+ case BITREV_D:
+ Format(instr, "bitrev.d 'rd, 'rj");
+ break;
+ case EXT_W_B:
+ Format(instr, "ext.w.b 'rd, 'rj");
+ break;
+ case EXT_W_H:
+ Format(instr, "ext.w.h 'rd, 'rj");
+ break;
+ case FABS_S:
+ Format(instr, "fabs.s 'fd, 'fj");
+ break;
+ case FABS_D:
+ Format(instr, "fabs.d 'fd, 'fj");
+ break;
+ case FNEG_S:
+ Format(instr, "fneg.s 'fd, 'fj");
+ break;
+ case FNEG_D:
+ Format(instr, "fneg.d 'fd, 'fj");
+ break;
+ case FSQRT_S:
+ Format(instr, "fsqrt.s 'fd, 'fj");
+ break;
+ case FSQRT_D:
+ Format(instr, "fsqrt.d 'fd, 'fj");
+ break;
+ case FMOV_S:
+ Format(instr, "fmov.s 'fd, 'fj");
+ break;
+ case FMOV_D:
+ Format(instr, "fmov.d 'fd, 'fj");
+ break;
+ case MOVGR2FR_W:
+ Format(instr, "movgr2fr.w 'fd, 'rj");
+ break;
+ case MOVGR2FR_D:
+ Format(instr, "movgr2fr.d 'fd, 'rj");
+ break;
+ case MOVGR2FRH_W:
+ Format(instr, "movgr2frh.w 'fd, 'rj");
+ break;
+ case MOVFR2GR_S:
+ Format(instr, "movfr2gr.s 'rd, 'fj");
+ break;
+ case MOVFR2GR_D:
+ Format(instr, "movfr2gr.d 'rd, 'fj");
+ break;
+ case MOVFRH2GR_S:
+ Format(instr, "movfrh2gr.s 'rd, 'fj");
+ break;
+ case MOVGR2FCSR:
+ Format(instr, "movgr2fcsr fcsr, 'rj");
+ break;
+ case MOVFCSR2GR:
+ Format(instr, "movfcsr2gr 'rd, fcsr");
+ break;
+ case FCVT_S_D:
+ Format(instr, "fcvt.s.d 'fd, 'fj");
+ break;
+ case FCVT_D_S:
+ Format(instr, "fcvt.d.s 'fd, 'fj");
+ break;
+ case FTINTRM_W_S:
+ Format(instr, "ftintrm.w.s 'fd, 'fj");
+ break;
+ case FTINTRM_W_D:
+ Format(instr, "ftintrm.w.d 'fd, 'fj");
+ break;
+ case FTINTRM_L_S:
+ Format(instr, "ftintrm.l.s 'fd, 'fj");
+ break;
+ case FTINTRM_L_D:
+ Format(instr, "ftintrm.l.d 'fd, 'fj");
+ break;
+ case FTINTRP_W_S:
+ Format(instr, "ftintrp.w.s 'fd, 'fj");
+ break;
+ case FTINTRP_W_D:
+ Format(instr, "ftintrp.w.d 'fd, 'fj");
+ break;
+ case FTINTRP_L_S:
+ Format(instr, "ftintrp.l.s 'fd, 'fj");
+ break;
+ case FTINTRP_L_D:
+ Format(instr, "ftintrp.l.d 'fd, 'fj");
+ break;
+ case FTINTRZ_W_S:
+ Format(instr, "ftintrz.w.s 'fd, 'fj");
+ break;
+ case FTINTRZ_W_D:
+ Format(instr, "ftintrz.w.d 'fd, 'fj");
+ break;
+ case FTINTRZ_L_S:
+ Format(instr, "ftintrz.l.s 'fd, 'fj");
+ break;
+ case FTINTRZ_L_D:
+ Format(instr, "ftintrz.l.d 'fd, 'fj");
+ break;
+ case FTINTRNE_W_S:
+ Format(instr, "ftintrne.w.s 'fd, 'fj");
+ break;
+ case FTINTRNE_W_D:
+ Format(instr, "ftintrne.w.d 'fd, 'fj");
+ break;
+ case FTINTRNE_L_S:
+ Format(instr, "ftintrne.l.s 'fd, 'fj");
+ break;
+ case FTINTRNE_L_D:
+ Format(instr, "ftintrne.l.d 'fd, 'fj");
+ break;
+ case FTINT_W_S:
+ Format(instr, "ftint.w.s 'fd, 'fj");
+ break;
+ case FTINT_W_D:
+ Format(instr, "ftint.w.d 'fd, 'fj");
+ break;
+ case FTINT_L_S:
+ Format(instr, "ftint.l.s 'fd, 'fj");
+ break;
+ case FTINT_L_D:
+ Format(instr, "ftint.l.d 'fd, 'fj");
+ break;
+ case FFINT_S_W:
+ Format(instr, "ffint.s.w 'fd, 'fj");
+ break;
+ case FFINT_S_L:
+ Format(instr, "ffint.s.l 'fd, 'fj");
+ break;
+ case FFINT_D_W:
+ Format(instr, "ffint.d.w 'fd, 'fj");
+ break;
+ case FFINT_D_L:
+ Format(instr, "ffint.d.l 'fd, 'fj");
+ break;
+ case FRINT_S:
+ Format(instr, "frint.s 'fd, 'fj");
+ break;
+ case FRINT_D:
+ Format(instr, "frint.d 'fd, 'fj");
+ break;
+ case MOVFR2CF:
+ Format(instr, "movfr2cf fcc'cd, 'fj");
+ break;
+ case MOVCF2FR:
+ Format(instr, "movcf2fr 'fd, fcc'cj");
+ break;
+ case MOVGR2CF:
+ Format(instr, "movgr2cf fcc'cd, 'rj");
+ break;
+ case MOVCF2GR:
+ Format(instr, "movcf2gr 'rd, fcc'cj");
+ break;
+ case FRECIP_S:
+ Format(instr, "frecip.s 'fd, 'fj");
+ break;
+ case FRECIP_D:
+ Format(instr, "frecip.d 'fd, 'fj");
+ break;
+ case FRSQRT_S:
+ Format(instr, "frsqrt.s 'fd, 'fj");
+ break;
+ case FRSQRT_D:
+ Format(instr, "frsqrt.d 'fd, 'fj");
+ break;
+ case FCLASS_S:
+ Format(instr, "fclass.s 'fd, 'fj");
+ break;
+ case FCLASS_D:
+ Format(instr, "fclass.d 'fd, 'fj");
+ break;
+ case FLOGB_S:
+ Format(instr, "flogb.s 'fd, 'fj");
+ break;
+ case FLOGB_D:
+ Format(instr, "flogb.d 'fd, 'fj");
+ break;
+ case CLO_W:
+ Format(instr, "clo.w 'rd, 'rj");
+ break;
+ case CTO_W:
+ Format(instr, "cto.w 'rd, 'rj");
+ break;
+ case CLO_D:
+ Format(instr, "clo.d 'rd, 'rj");
+ break;
+ case CTO_D:
+ Format(instr, "cto.d 'rd, 'rj");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+int Decoder::InstructionDecode(byte* instr_ptr) {
+ Instruction* instr = Instruction::At(instr_ptr);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%08x ", instr->InstructionBits());
+ switch (instr->InstructionType()) {
+ case Instruction::kOp6Type: {
+ DecodeTypekOp6(instr);
+ break;
+ }
+ case Instruction::kOp7Type: {
+ DecodeTypekOp7(instr);
+ break;
+ }
+ case Instruction::kOp8Type: {
+ DecodeTypekOp8(instr);
+ break;
+ }
+ case Instruction::kOp10Type: {
+ DecodeTypekOp10(instr);
+ break;
+ }
+ case Instruction::kOp12Type: {
+ DecodeTypekOp12(instr);
+ break;
+ }
+ case Instruction::kOp14Type: {
+ DecodeTypekOp14(instr);
+ break;
+ }
+ case Instruction::kOp17Type: {
+ return DecodeTypekOp17(instr);
+ }
+ case Instruction::kOp22Type: {
+ DecodeTypekOp22(instr);
+ break;
+ }
+ case Instruction::kUnsupported: {
+ Format(instr, "UNSUPPORTED");
+ break;
+ }
+ default: {
+ Format(instr, "UNSUPPORTED");
+ break;
+ }
+ }
+ return kInstrSize;
+}
+
+} // namespace internal
+} // namespace v8
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ v8::base::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
+ return tmp_buffer_.begin();
+}
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+ return NameOfAddress(addr);
+}
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ return v8::internal::Registers::Name(reg);
+}
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ return v8::internal::FPURegisters::Name(reg);
+}
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ UNREACHABLE();
+}
+
+const char* NameConverter::NameInCode(byte* addr) const {
+ // The default name converter is called for unknown code. So we will not try
+ // to access any memory.
+ return "";
+}
+
+//------------------------------------------------------------------------------
+
+int Disassembler::InstructionDecode(v8::base::Vector<char> buffer,
+ byte* instruction) {
+ v8::internal::Decoder d(converter_, buffer);
+ return d.InstructionDecode(instruction);
+}
+
+int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
+
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
+ UnimplementedOpcodeAction unimplemented_action) {
+ NameConverter converter;
+ Disassembler d(converter, unimplemented_action);
+ for (byte* pc = begin; pc < end;) {
+ v8::base::EmbeddedVector<char, 128> buffer;
+ buffer[0] = '\0';
+ byte* prev_pc = pc;
+ pc += d.InstructionDecode(buffer, pc);
+ v8::internal::PrintF(f, "%p %08x %s\n", static_cast<void*>(prev_pc),
+ *reinterpret_cast<int32_t*>(prev_pc), buffer.begin());
+ }
+}
+
+#undef STRING_STARTS_WITH
+
+} // namespace disasm
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/deps/v8/src/diagnostics/loong64/unwinder-loong64.cc b/deps/v8/src/diagnostics/loong64/unwinder-loong64.cc
new file mode 100644
index 0000000000..84d2e41cfc
--- /dev/null
+++ b/deps/v8/src/diagnostics/loong64/unwinder-loong64.cc
@@ -0,0 +1,14 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/diagnostics/unwinder.h"
+
+namespace v8 {
+
+struct RegisterState;
+
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {}
+
+} // namespace v8
diff --git a/deps/v8/src/diagnostics/mips/disasm-mips.cc b/deps/v8/src/diagnostics/mips/disasm-mips.cc
index c5aeb27457..32a0bdb048 100644
--- a/deps/v8/src/diagnostics/mips/disasm-mips.cc
+++ b/deps/v8/src/diagnostics/mips/disasm-mips.cc
@@ -555,7 +555,6 @@ void Decoder::PrintMsaDataFormat(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else {
char DF[] = {'b', 'h', 'w', 'd'};
@@ -600,7 +599,6 @@ void Decoder::PrintMsaDataFormat(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
@@ -904,7 +902,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
PrintSa(instr);
return 2;
}
- break;
case 'd': {
DCHECK(STRING_STARTS_WITH(format, "sd"));
PrintSd(instr);
@@ -1521,7 +1518,6 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
}
}
@@ -1538,7 +1534,6 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
switch (instr->RsFieldRaw()) {
case BC1: // bc1 handled in DecodeTypeImmediate.
UNREACHABLE();
- break;
case MFC1:
Format(instr, "mfc1 'rt, 'fs");
break;
@@ -1966,7 +1961,6 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
}
@@ -1997,7 +1991,6 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
break;
default:
@@ -2703,7 +2696,6 @@ const char* NameConverter::NameOfXMMRegister(int reg) const {
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // MIPS does not have the concept of a byte register.
- return "nobytereg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/deps/v8/src/diagnostics/mips64/disasm-mips64.cc b/deps/v8/src/diagnostics/mips64/disasm-mips64.cc
index d8ff14730d..0712431fc3 100644
--- a/deps/v8/src/diagnostics/mips64/disasm-mips64.cc
+++ b/deps/v8/src/diagnostics/mips64/disasm-mips64.cc
@@ -596,7 +596,6 @@ void Decoder::PrintMsaDataFormat(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else {
char DF[] = {'b', 'h', 'w', 'd'};
@@ -641,7 +640,6 @@ void Decoder::PrintMsaDataFormat(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
@@ -945,7 +943,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
PrintSa(instr);
return 2;
}
- break;
case 'd': {
DCHECK(STRING_STARTS_WITH(format, "sd"));
PrintSd(instr);
@@ -1744,7 +1741,6 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
break;
}
@@ -1761,7 +1757,6 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
break;
}
@@ -1782,7 +1777,6 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
break;
}
@@ -2250,7 +2244,6 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
break;
}
@@ -2285,7 +2278,6 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
break;
default:
@@ -2993,7 +2985,6 @@ const char* NameConverter::NameOfXMMRegister(int reg) const {
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // MIPS does not have the concept of a byte register.
- return "nobytereg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index e45d7580c8..a74548e949 100644
--- a/deps/v8/src/diagnostics/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -343,8 +343,6 @@ void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) {
}
}
-USE_TORQUE_VERIFIER(JSReceiver)
-
bool JSObject::ElementsAreSafeToExamine(PtrComprCageBase cage_base) const {
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
@@ -785,8 +783,6 @@ void JSDate::JSDateVerify(Isolate* isolate) {
}
}
-USE_TORQUE_VERIFIER(JSMessageObject)
-
void String::StringVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::StringVerify(*this, isolate);
CHECK(length() >= 0 && length() <= Smi::kMaxValue);
@@ -1005,8 +1001,11 @@ void Code::CodeVerify(Isolate* isolate) {
CHECK_LE(constant_pool_offset(), code_comments_offset());
CHECK_LE(code_comments_offset(), unwinding_info_offset());
CHECK_LE(unwinding_info_offset(), MetadataSize());
+#if !defined(_MSC_VER) || defined(__clang__)
+ // See also: PlatformEmbeddedFileWriterWin::AlignToCodeAlignment.
CHECK_IMPLIES(!ReadOnlyHeap::Contains(*this),
IsAligned(InstructionStart(), kCodeAlignment));
+#endif // !defined(_MSC_VER) || defined(__clang__)
CHECK_IMPLIES(!ReadOnlyHeap::Contains(*this),
IsAligned(raw_instruction_start(), kCodeAlignment));
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
@@ -1139,19 +1138,13 @@ void JSWeakRef::JSWeakRefVerify(Isolate* isolate) {
}
void JSFinalizationRegistry::JSFinalizationRegistryVerify(Isolate* isolate) {
- CHECK(IsJSFinalizationRegistry());
- JSObjectVerify(isolate);
- VerifyHeapPointer(isolate, cleanup());
- CHECK(active_cells().IsUndefined(isolate) || active_cells().IsWeakCell());
+ TorqueGeneratedClassVerifiers::JSFinalizationRegistryVerify(*this, isolate);
if (active_cells().IsWeakCell()) {
CHECK(WeakCell::cast(active_cells()).prev().IsUndefined(isolate));
}
- CHECK(cleared_cells().IsUndefined(isolate) || cleared_cells().IsWeakCell());
if (cleared_cells().IsWeakCell()) {
CHECK(WeakCell::cast(cleared_cells()).prev().IsUndefined(isolate));
}
- CHECK(next_dirty().IsUndefined(isolate) ||
- next_dirty().IsJSFinalizationRegistry());
}
void JSWeakMap::JSWeakMapVerify(Isolate* isolate) {
diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index 46fccedde7..f8e967dbf1 100644
--- a/deps/v8/src/diagnostics/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -1513,7 +1513,7 @@ void JSFunction::JSFunctionPrint(std::ostream& os) {
}
os << "\n - formal_parameter_count: "
- << shared().internal_formal_parameter_count();
+ << shared().internal_formal_parameter_count_without_receiver();
os << "\n - kind: " << shared().kind();
os << "\n - context: " << Brief(context());
os << "\n - code: " << Brief(raw_code());
@@ -1583,7 +1583,8 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) {
os << "\n - kind: " << kind();
os << "\n - syntax kind: " << syntax_kind();
os << "\n - function_map_index: " << function_map_index();
- os << "\n - formal_parameter_count: " << internal_formal_parameter_count();
+ os << "\n - formal_parameter_count: "
+ << internal_formal_parameter_count_without_receiver();
os << "\n - expected_nof_properties: " << expected_nof_properties();
os << "\n - language_mode: " << language_mode();
os << "\n - data: " << Brief(function_data(kAcquireLoad));
@@ -1658,7 +1659,7 @@ void Code::CodePrint(std::ostream& os) {
void CodeDataContainer::CodeDataContainerPrint(std::ostream& os) {
PrintHeader(os, "CodeDataContainer");
- os << "\n - kind_specific_flags: " << kind_specific_flags();
+ os << "\n - kind_specific_flags: " << kind_specific_flags(kRelaxedLoad);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
os << "\n - code: " << Brief(code());
os << "\n - code_entry_point: "
@@ -1985,7 +1986,6 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) {
}
os << "\n - memory_start: " << static_cast<void*>(memory_start());
os << "\n - memory_size: " << memory_size();
- os << "\n - memory_mask: " << AsHex(memory_mask());
os << "\n - imported_function_targets: "
<< static_cast<void*>(imported_function_targets());
os << "\n - globals_start: " << static_cast<void*>(globals_start());
diff --git a/deps/v8/src/diagnostics/perf-jit.h b/deps/v8/src/diagnostics/perf-jit.h
index 746f9f7c85..47a6002b08 100644
--- a/deps/v8/src/diagnostics/perf-jit.h
+++ b/deps/v8/src/diagnostics/perf-jit.h
@@ -87,6 +87,7 @@ class PerfJitLogger : public CodeEventLogger {
static const uint32_t kElfMachARM = 40;
static const uint32_t kElfMachMIPS = 8;
static const uint32_t kElfMachMIPS64 = 8;
+ static const uint32_t kElfMachLOONG64 = 258;
static const uint32_t kElfMachARM64 = 183;
static const uint32_t kElfMachS390x = 22;
static const uint32_t kElfMachPPC64 = 21;
@@ -103,6 +104,8 @@ class PerfJitLogger : public CodeEventLogger {
return kElfMachMIPS;
#elif V8_TARGET_ARCH_MIPS64
return kElfMachMIPS64;
+#elif V8_TARGET_ARCH_LOONG64
+ return kElfMachLOONG64;
#elif V8_TARGET_ARCH_ARM64
return kElfMachARM64;
#elif V8_TARGET_ARCH_S390X
diff --git a/deps/v8/src/diagnostics/ppc/disasm-ppc.cc b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
index affbc0fc8e..7d366a6ba1 100644
--- a/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
+++ b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
@@ -917,6 +917,18 @@ void Decoder::DecodeExt2(Instruction* instr) {
Format(instr, "cnttzd'. 'ra, 'rs");
return;
}
+ case BRH: {
+ Format(instr, "brh 'ra, 'rs");
+ return;
+ }
+ case BRW: {
+ Format(instr, "brw 'ra, 'rs");
+ return;
+ }
+ case BRD: {
+ Format(instr, "brd 'ra, 'rs");
+ return;
+ }
case ANDX: {
Format(instr, "and'. 'ra, 'rs, 'rb");
return;
@@ -1393,13 +1405,20 @@ void Decoder::DecodeExt6(Instruction* instr) {
#undef DECODE_XX2_B_INSTRUCTIONS
}
switch (EXT6 | (instr->BitField(10, 2))) {
-#define DECODE_XX2_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: { \
- Format(instr, #name " 'Xt, 'Xb"); \
- return; \
+#define DECODE_XX2_VECTOR_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: { \
+ Format(instr, #name " 'Xt, 'Xb"); \
+ return; \
+ }
+ PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(DECODE_XX2_VECTOR_A_INSTRUCTIONS)
+#undef DECODE_XX2_VECTOR_A_INSTRUCTIONS
+#define DECODE_XX2_SCALAR_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: { \
+ Format(instr, #name " 'Dt, 'Db"); \
+ return; \
}
- PPC_XX2_OPCODE_A_FORM_LIST(DECODE_XX2_A_INSTRUCTIONS)
-#undef DECODE_XX2_A_INSTRUCTIONS
+ PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(DECODE_XX2_SCALAR_A_INSTRUCTIONS)
+#undef DECODE_XX2_SCALAR_A_INSTRUCTIONS
}
Unknown(instr); // not used by V8
}
diff --git a/deps/v8/src/diagnostics/ppc/eh-frame-ppc.cc b/deps/v8/src/diagnostics/ppc/eh-frame-ppc.cc
index 148d01116d..8f7198cd05 100644
--- a/deps/v8/src/diagnostics/ppc/eh-frame-ppc.cc
+++ b/deps/v8/src/diagnostics/ppc/eh-frame-ppc.cc
@@ -32,7 +32,6 @@ int EhFrameWriter::RegisterToDwarfCode(Register name) {
return kR0DwarfCode;
default:
UNIMPLEMENTED();
- return -1;
}
}
@@ -47,7 +46,6 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "sp";
default:
UNIMPLEMENTED();
- return nullptr;
}
}
diff --git a/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc b/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc
index 2955612166..c3977cbf3e 100644
--- a/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc
+++ b/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc
@@ -68,11 +68,15 @@ class Decoder {
// Printing of common values.
void PrintRegister(int reg);
void PrintFPURegister(int freg);
+ void PrintVRegister(int reg);
void PrintFPUStatusRegister(int freg);
void PrintRs1(Instruction* instr);
void PrintRs2(Instruction* instr);
void PrintRd(Instruction* instr);
+ void PrintUimm(Instruction* instr);
void PrintVs1(Instruction* instr);
+ void PrintVs2(Instruction* instr);
+ void PrintVd(Instruction* instr);
void PrintFRs1(Instruction* instr);
void PrintFRs2(Instruction* instr);
void PrintFRs3(Instruction* instr);
@@ -96,10 +100,15 @@ class Decoder {
void PrintRvcImm8Addi4spn(Instruction* instr);
void PrintRvcImm11CJ(Instruction* instr);
void PrintRvcImm8B(Instruction* instr);
+ void PrintRvvVm(Instruction* instr);
void PrintAcquireRelease(Instruction* instr);
void PrintBranchOffset(Instruction* instr);
void PrintStoreOffset(Instruction* instr);
void PrintCSRReg(Instruction* instr);
+ void PrintRvvSEW(Instruction* instr);
+ void PrintRvvLMUL(Instruction* instr);
+ void PrintRvvSimm5(Instruction* instr);
+ void PrintRvvUimm5(Instruction* instr);
void PrintRoundingMode(Instruction* instr);
void PrintMemoryOrder(Instruction* instr, bool is_pred);
@@ -123,6 +132,14 @@ class Decoder {
void DecodeCJType(Instruction* instr);
void DecodeCBType(Instruction* instr);
+ void DecodeVType(Instruction* instr);
+ void DecodeRvvIVV(Instruction* instr);
+ void DecodeRvvIVI(Instruction* instr);
+ void DecodeRvvIVX(Instruction* instr);
+ void DecodeRvvVL(Instruction* instr);
+ void DecodeRvvVS(Instruction* instr);
+ void DecodeRvvMVV(Instruction* instr);
+ void DecodeRvvMVX(Instruction* instr);
// Printing of instruction name.
void PrintInstructionName(Instruction* instr);
@@ -137,6 +154,8 @@ class Decoder {
void Format(Instruction* instr, const char* format);
void Unknown(Instruction* instr);
+ int switch_sew(Instruction* instr);
+ int switch_nf(Instruction* instr);
const disasm::NameConverter& converter_;
v8::base::Vector<char> out_buffer_;
int out_buffer_pos_;
@@ -164,6 +183,10 @@ void Decoder::PrintRegister(int reg) {
Print(converter_.NameOfCPURegister(reg));
}
+void Decoder::PrintVRegister(int reg) {
+ Print(v8::internal::VRegisters::Name(reg));
+}
+
void Decoder::PrintRs1(Instruction* instr) {
int reg = instr->Rs1Value();
PrintRegister(reg);
@@ -179,11 +202,26 @@ void Decoder::PrintRd(Instruction* instr) {
PrintRegister(reg);
}
-void Decoder::PrintVs1(Instruction* instr) {
+void Decoder::PrintUimm(Instruction* instr) {
int val = instr->Rs1Value();
out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", val);
}
+void Decoder::PrintVs1(Instruction* instr) {
+ int reg = instr->Vs1Value();
+ PrintVRegister(reg);
+}
+
+void Decoder::PrintVs2(Instruction* instr) {
+ int reg = instr->Vs2Value();
+ PrintVRegister(reg);
+}
+
+void Decoder::PrintVd(Instruction* instr) {
+ int reg = instr->VdValue();
+ PrintVRegister(reg);
+}
+
// Print the FPUregister name according to the active name converter.
void Decoder::PrintFPURegister(int freg) {
Print(converter_.NameOfXMMRegister(freg));
@@ -247,6 +285,26 @@ void Decoder::PrintStoreOffset(Instruction* instr) {
out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
+void Decoder::PrintRvvSEW(Instruction* instr) {
+ const char* sew = instr->RvvSEW();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", sew);
+}
+
+void Decoder::PrintRvvLMUL(Instruction* instr) {
+ const char* lmul = instr->RvvLMUL();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", lmul);
+}
+
+void Decoder::PrintRvvSimm5(Instruction* instr) {
+ const int simm5 = instr->RvvSimm5();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", simm5);
+}
+
+void Decoder::PrintRvvUimm5(Instruction* instr) {
+ const uint32_t uimm5 = instr->RvvUimm5();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", uimm5);
+}
+
void Decoder::PrintImm20U(Instruction* instr) {
int32_t imm = instr->Imm20UValue();
out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
@@ -335,6 +393,13 @@ void Decoder::PrintRvcImm8B(Instruction* instr) {
out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
+void Decoder::PrintRvvVm(Instruction* instr) {
+ uint8_t imm = instr->RvvVM();
+ if (imm == 0) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, " vm");
+ }
+}
+
void Decoder::PrintAcquireRelease(Instruction* instr) {
bool aq = instr->AqValue();
bool rl = instr->RlValue();
@@ -724,13 +789,50 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
DCHECK(STRING_STARTS_WITH(format, "suc"));
PrintMemoryOrder(instr, false);
return 3;
+ } else if (format[1] == 'e') {
+ DCHECK(STRING_STARTS_WITH(format, "sew"));
+ PrintRvvSEW(instr);
+ return 3;
+ } else if (format[1] == 'i') {
+ DCHECK(STRING_STARTS_WITH(format, "simm5"));
+ PrintRvvSimm5(instr);
+ return 5;
}
UNREACHABLE();
}
case 'v': { // 'vs1: Raw values from register fields
- DCHECK(STRING_STARTS_WITH(format, "vs1"));
- PrintVs1(instr);
- return 3;
+ if (format[1] == 'd') {
+ DCHECK(STRING_STARTS_WITH(format, "vd"));
+ PrintVd(instr);
+ return 2;
+ } else if (format[2] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "vs1"));
+ PrintVs1(instr);
+ return 3;
+ } else if (format[2] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "vs2"));
+ PrintVs2(instr);
+ return 3;
+ } else {
+ DCHECK(STRING_STARTS_WITH(format, "vm"));
+ PrintRvvVm(instr);
+ return 2;
+ }
+ }
+ case 'l': {
+ DCHECK(STRING_STARTS_WITH(format, "lmul"));
+ PrintRvvLMUL(instr);
+ return 4;
+ }
+ case 'u': {
+ if (STRING_STARTS_WITH(format, "uimm5")) {
+ PrintRvvUimm5(instr);
+ return 5;
+ } else {
+ DCHECK(STRING_STARTS_WITH(format, "uimm"));
+ PrintUimm(instr);
+ return 4;
+ }
}
case 't': { // 'target: target of branch instructions'
DCHECK(STRING_STARTS_WITH(format, "target"));
@@ -1308,256 +1410,265 @@ void Decoder::DecodeR4Type(Instruction* instr) {
}
void Decoder::DecodeIType(Instruction* instr) {
- switch (instr->InstructionBits() & kITypeMask) {
- case RO_JALR:
- if (instr->RdValue() == zero_reg.code() &&
- instr->Rs1Value() == ra.code() && instr->Imm12Value() == 0)
- Format(instr, "ret");
- else if (instr->RdValue() == zero_reg.code() && instr->Imm12Value() == 0)
- Format(instr, "jr 'rs1");
- else if (instr->RdValue() == ra.code() && instr->Imm12Value() == 0)
- Format(instr, "jalr 'rs1");
- else
- Format(instr, "jalr 'rd, 'imm12('rs1)'target");
- break;
- case RO_LB:
- Format(instr, "lb 'rd, 'imm12('rs1)");
- break;
- case RO_LH:
- Format(instr, "lh 'rd, 'imm12('rs1)");
- break;
- case RO_LW:
- Format(instr, "lw 'rd, 'imm12('rs1)");
- break;
- case RO_LBU:
- Format(instr, "lbu 'rd, 'imm12('rs1)");
- break;
- case RO_LHU:
- Format(instr, "lhu 'rd, 'imm12('rs1)");
- break;
+ if (instr->vl_vs_width() != -1) {
+ DecodeRvvVL(instr);
+ } else {
+ switch (instr->InstructionBits() & kITypeMask) {
+ case RO_JALR:
+ if (instr->RdValue() == zero_reg.code() &&
+ instr->Rs1Value() == ra.code() && instr->Imm12Value() == 0)
+ Format(instr, "ret");
+ else if (instr->RdValue() == zero_reg.code() &&
+ instr->Imm12Value() == 0)
+ Format(instr, "jr 'rs1");
+ else if (instr->RdValue() == ra.code() && instr->Imm12Value() == 0)
+ Format(instr, "jalr 'rs1");
+ else
+ Format(instr, "jalr 'rd, 'imm12('rs1)");
+ break;
+ case RO_LB:
+ Format(instr, "lb 'rd, 'imm12('rs1)");
+ break;
+ case RO_LH:
+ Format(instr, "lh 'rd, 'imm12('rs1)");
+ break;
+ case RO_LW:
+ Format(instr, "lw 'rd, 'imm12('rs1)");
+ break;
+ case RO_LBU:
+ Format(instr, "lbu 'rd, 'imm12('rs1)");
+ break;
+ case RO_LHU:
+ Format(instr, "lhu 'rd, 'imm12('rs1)");
+ break;
#ifdef V8_TARGET_ARCH_64_BIT
- case RO_LWU:
- Format(instr, "lwu 'rd, 'imm12('rs1)");
- break;
- case RO_LD:
- Format(instr, "ld 'rd, 'imm12('rs1)");
- break;
+ case RO_LWU:
+ Format(instr, "lwu 'rd, 'imm12('rs1)");
+ break;
+ case RO_LD:
+ Format(instr, "ld 'rd, 'imm12('rs1)");
+ break;
#endif /*V8_TARGET_ARCH_64_BIT*/
- case RO_ADDI:
- if (instr->Imm12Value() == 0) {
- if (instr->RdValue() == zero_reg.code() &&
- instr->Rs1Value() == zero_reg.code())
- Format(instr, "nop");
+ case RO_ADDI:
+ if (instr->Imm12Value() == 0) {
+ if (instr->RdValue() == zero_reg.code() &&
+ instr->Rs1Value() == zero_reg.code())
+ Format(instr, "nop");
+ else
+ Format(instr, "mv 'rd, 'rs1");
+ } else if (instr->Rs1Value() == zero_reg.code()) {
+ Format(instr, "li 'rd, 'imm12");
+ } else {
+ Format(instr, "addi 'rd, 'rs1, 'imm12");
+ }
+ break;
+ case RO_SLTI:
+ Format(instr, "slti 'rd, 'rs1, 'imm12");
+ break;
+ case RO_SLTIU:
+ if (instr->Imm12Value() == 1)
+ Format(instr, "seqz 'rd, 'rs1");
else
- Format(instr, "mv 'rd, 'rs1");
- } else if (instr->Rs1Value() == zero_reg.code()) {
- Format(instr, "li 'rd, 'imm12");
- } else {
- Format(instr, "addi 'rd, 'rs1, 'imm12");
- }
- break;
- case RO_SLTI:
- Format(instr, "slti 'rd, 'rs1, 'imm12");
- break;
- case RO_SLTIU:
- if (instr->Imm12Value() == 1)
- Format(instr, "seqz 'rd, 'rs1");
- else
- Format(instr, "sltiu 'rd, 'rs1, 'imm12");
- break;
- case RO_XORI:
- if (instr->Imm12Value() == -1)
- Format(instr, "not 'rd, 'rs1");
- else
- Format(instr, "xori 'rd, 'rs1, 'imm12x");
- break;
- case RO_ORI:
- Format(instr, "ori 'rd, 'rs1, 'imm12x");
- break;
- case RO_ANDI:
- Format(instr, "andi 'rd, 'rs1, 'imm12x");
- break;
- case RO_SLLI:
- Format(instr, "slli 'rd, 'rs1, 's64");
- break;
- case RO_SRLI: { // RO_SRAI
- if (!instr->IsArithShift()) {
- Format(instr, "srli 'rd, 'rs1, 's64");
- } else {
- Format(instr, "srai 'rd, 'rs1, 's64");
+ Format(instr, "sltiu 'rd, 'rs1, 'imm12");
+ break;
+ case RO_XORI:
+ if (instr->Imm12Value() == -1)
+ Format(instr, "not 'rd, 'rs1");
+ else
+ Format(instr, "xori 'rd, 'rs1, 'imm12x");
+ break;
+ case RO_ORI:
+ Format(instr, "ori 'rd, 'rs1, 'imm12x");
+ break;
+ case RO_ANDI:
+ Format(instr, "andi 'rd, 'rs1, 'imm12x");
+ break;
+ case RO_SLLI:
+ Format(instr, "slli 'rd, 'rs1, 's64");
+ break;
+ case RO_SRLI: { // RO_SRAI
+ if (!instr->IsArithShift()) {
+ Format(instr, "srli 'rd, 'rs1, 's64");
+ } else {
+ Format(instr, "srai 'rd, 'rs1, 's64");
+ }
+ break;
}
- break;
- }
#ifdef V8_TARGET_ARCH_64_BIT
- case RO_ADDIW:
- if (instr->Imm12Value() == 0)
- Format(instr, "sext.w 'rd, 'rs1");
- else
- Format(instr, "addiw 'rd, 'rs1, 'imm12");
- break;
- case RO_SLLIW:
- Format(instr, "slliw 'rd, 'rs1, 's32");
- break;
- case RO_SRLIW: { // RO_SRAIW
- if (!instr->IsArithShift()) {
- Format(instr, "srliw 'rd, 'rs1, 's32");
- } else {
- Format(instr, "sraiw 'rd, 'rs1, 's32");
+ case RO_ADDIW:
+ if (instr->Imm12Value() == 0)
+ Format(instr, "sext.w 'rd, 'rs1");
+ else
+ Format(instr, "addiw 'rd, 'rs1, 'imm12");
+ break;
+ case RO_SLLIW:
+ Format(instr, "slliw 'rd, 'rs1, 's32");
+ break;
+ case RO_SRLIW: { // RO_SRAIW
+ if (!instr->IsArithShift()) {
+ Format(instr, "srliw 'rd, 'rs1, 's32");
+ } else {
+ Format(instr, "sraiw 'rd, 'rs1, 's32");
+ }
+ break;
}
- break;
- }
#endif /*V8_TARGET_ARCH_64_BIT*/
- case RO_FENCE:
- if (instr->MemoryOrder(true) == PSIORW &&
- instr->MemoryOrder(false) == PSIORW)
- Format(instr, "fence");
- else
- Format(instr, "fence 'pre, 'suc");
- break;
- case RO_ECALL: { // RO_EBREAK
- if (instr->Imm12Value() == 0) { // ECALL
- Format(instr, "ecall");
- } else if (instr->Imm12Value() == 1) { // EBREAK
- Format(instr, "ebreak");
- } else {
- UNSUPPORTED_RISCV();
+ case RO_FENCE:
+ if (instr->MemoryOrder(true) == PSIORW &&
+ instr->MemoryOrder(false) == PSIORW)
+ Format(instr, "fence");
+ else
+ Format(instr, "fence 'pre, 'suc");
+ break;
+ case RO_ECALL: { // RO_EBREAK
+ if (instr->Imm12Value() == 0) { // ECALL
+ Format(instr, "ecall");
+ } else if (instr->Imm12Value() == 1) { // EBREAK
+ Format(instr, "ebreak");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
}
- break;
- }
- // TODO(riscv): use Zifencei Standard Extension macro block
- case RO_FENCE_I:
- Format(instr, "fence.i");
- break;
- // TODO(riscv): use Zicsr Standard Extension macro block
- case RO_CSRRW:
- if (instr->CsrValue() == csr_fcsr) {
+ // TODO(riscv): use Zifencei Standard Extension macro block
+ case RO_FENCE_I:
+ Format(instr, "fence.i");
+ break;
+ // TODO(riscv): use Zicsr Standard Extension macro block
+ // FIXME(RISC-V): Add special formatting for CSR registers
+ case RO_CSRRW:
+ if (instr->CsrValue() == csr_fcsr) {
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "fscsr 'rs1");
+ else
+ Format(instr, "fscsr 'rd, 'rs1");
+ } else if (instr->CsrValue() == csr_frm) {
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "fsrm 'rs1");
+ else
+ Format(instr, "fsrm 'rd, 'rs1");
+ } else if (instr->CsrValue() == csr_fflags) {
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "fsflags 'rs1");
+ else
+ Format(instr, "fsflags 'rd, 'rs1");
+ } else if (instr->RdValue() == zero_reg.code()) {
+ Format(instr, "csrw 'csr, 'rs1");
+ } else {
+ Format(instr, "csrrw 'rd, 'csr, 'rs1");
+ }
+ break;
+ case RO_CSRRS:
+ if (instr->Rs1Value() == zero_reg.code()) {
+ switch (instr->CsrValue()) {
+ case csr_instret:
+ Format(instr, "rdinstret 'rd");
+ break;
+ case csr_instreth:
+ Format(instr, "rdinstreth 'rd");
+ break;
+ case csr_time:
+ Format(instr, "rdtime 'rd");
+ break;
+ case csr_timeh:
+ Format(instr, "rdtimeh 'rd");
+ break;
+ case csr_cycle:
+ Format(instr, "rdcycle 'rd");
+ break;
+ case csr_cycleh:
+ Format(instr, "rdcycleh 'rd");
+ break;
+ case csr_fflags:
+ Format(instr, "frflags 'rd");
+ break;
+ case csr_frm:
+ Format(instr, "frrm 'rd");
+ break;
+ case csr_fcsr:
+ Format(instr, "frcsr 'rd");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if (instr->Rs1Value() == zero_reg.code()) {
+ Format(instr, "csrr 'rd, 'csr");
+ } else if (instr->RdValue() == zero_reg.code()) {
+ Format(instr, "csrs 'csr, 'rs1");
+ } else {
+ Format(instr, "csrrs 'rd, 'csr, 'rs1");
+ }
+ break;
+ case RO_CSRRC:
if (instr->RdValue() == zero_reg.code())
- Format(instr, "fscsr 'rs1");
+ Format(instr, "csrc 'csr, 'rs1");
else
- Format(instr, "fscsr 'rd, 'rs1");
- } else if (instr->CsrValue() == csr_frm) {
+ Format(instr, "csrrc 'rd, 'csr, 'rs1");
+ break;
+ case RO_CSRRWI:
if (instr->RdValue() == zero_reg.code())
- Format(instr, "fsrm 'rs1");
+ Format(instr, "csrwi 'csr, 'uimm");
else
- Format(instr, "fsrm 'rd, 'rs1");
- } else if (instr->CsrValue() == csr_fflags) {
+ Format(instr, "csrrwi 'rd, 'csr, 'uimm");
+ break;
+ case RO_CSRRSI:
if (instr->RdValue() == zero_reg.code())
- Format(instr, "fsflags 'rs1");
+ Format(instr, "csrsi 'csr, 'uimm");
else
- Format(instr, "fsflags 'rd, 'rs1");
- } else if (instr->RdValue() == zero_reg.code()) {
- Format(instr, "csrw 'csr, 'rs1");
- } else {
- Format(instr, "csrrw 'rd, 'csr, 'rs1");
- }
- break;
- case RO_CSRRS:
- if (instr->Rs1Value() == zero_reg.code()) {
- switch (instr->CsrValue()) {
- case csr_instret:
- Format(instr, "rdinstret 'rd");
- break;
- case csr_instreth:
- Format(instr, "rdinstreth 'rd");
- break;
- case csr_time:
- Format(instr, "rdtime 'rd");
- break;
- case csr_timeh:
- Format(instr, "rdtimeh 'rd");
- break;
- case csr_cycle:
- Format(instr, "rdcycle 'rd");
- break;
- case csr_cycleh:
- Format(instr, "rdcycleh 'rd");
- break;
- case csr_fflags:
- Format(instr, "frflags 'rd");
- break;
- case csr_frm:
- Format(instr, "frrm 'rd");
- break;
- case csr_fcsr:
- Format(instr, "frcsr 'rd");
- break;
- default:
- UNREACHABLE();
- }
- } else if (instr->Rs1Value() == zero_reg.code()) {
- Format(instr, "csrr 'rd, 'csr");
- } else if (instr->RdValue() == zero_reg.code()) {
- Format(instr, "csrs 'csr, 'rs1");
- } else {
- Format(instr, "csrrs 'rd, 'csr, 'rs1");
- }
- break;
- case RO_CSRRC:
- if (instr->RdValue() == zero_reg.code())
- Format(instr, "csrc 'csr, 'rs1");
- else
- Format(instr, "csrrc 'rd, 'csr, 'rs1");
- break;
- case RO_CSRRWI:
- if (instr->RdValue() == zero_reg.code())
- Format(instr, "csrwi 'csr, 'vs1");
- else
- Format(instr, "csrrwi 'rd, 'csr, 'vs1");
- break;
- case RO_CSRRSI:
- if (instr->RdValue() == zero_reg.code())
- Format(instr, "csrsi 'csr, 'vs1");
- else
- Format(instr, "csrrsi 'rd, 'csr, 'vs1");
- break;
- case RO_CSRRCI:
- if (instr->RdValue() == zero_reg.code())
- Format(instr, "csrci 'csr, 'vs1");
- else
- Format(instr, "csrrci 'rd, 'csr, 'vs1");
- break;
- // TODO(riscv): use F Extension macro block
- case RO_FLW:
- Format(instr, "flw 'fd, 'imm12('rs1)");
- break;
- // TODO(riscv): use D Extension macro block
- case RO_FLD:
- Format(instr, "fld 'fd, 'imm12('rs1)");
- break;
- default:
- UNSUPPORTED_RISCV();
+ Format(instr, "csrrsi 'rd, 'csr, 'uimm");
+ break;
+ case RO_CSRRCI:
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "csrci 'csr, 'uimm");
+ else
+ Format(instr, "csrrci 'rd, 'csr, 'uimm");
+ break;
+ // TODO(riscv): use F Extension macro block
+ case RO_FLW:
+ Format(instr, "flw 'fd, 'imm12('rs1)");
+ break;
+ // TODO(riscv): use D Extension macro block
+ case RO_FLD:
+ Format(instr, "fld 'fd, 'imm12('rs1)");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
}
}
void Decoder::DecodeSType(Instruction* instr) {
- switch (instr->InstructionBits() & kSTypeMask) {
- case RO_SB:
- Format(instr, "sb 'rs2, 'offS('rs1)");
- break;
- case RO_SH:
- Format(instr, "sh 'rs2, 'offS('rs1)");
- break;
- case RO_SW:
- Format(instr, "sw 'rs2, 'offS('rs1)");
- break;
+ if (instr->vl_vs_width() != -1) {
+ DecodeRvvVS(instr);
+ } else {
+ switch (instr->InstructionBits() & kSTypeMask) {
+ case RO_SB:
+ Format(instr, "sb 'rs2, 'offS('rs1)");
+ break;
+ case RO_SH:
+ Format(instr, "sh 'rs2, 'offS('rs1)");
+ break;
+ case RO_SW:
+ Format(instr, "sw 'rs2, 'offS('rs1)");
+ break;
#ifdef V8_TARGET_ARCH_64_BIT
- case RO_SD:
- Format(instr, "sd 'rs2, 'offS('rs1)");
- break;
+ case RO_SD:
+ Format(instr, "sd 'rs2, 'offS('rs1)");
+ break;
#endif /*V8_TARGET_ARCH_64_BIT*/
- // TODO(riscv): use F Extension macro block
- case RO_FSW:
- Format(instr, "fsw 'fs2, 'offS('rs1)");
- break;
- // TODO(riscv): use D Extension macro block
- case RO_FSD:
- Format(instr, "fsd 'fs2, 'offS('rs1)");
- break;
- default:
- UNSUPPORTED_RISCV();
+ // TODO(riscv): use F Extension macro block
+ case RO_FSW:
+ Format(instr, "fsw 'fs2, 'offS('rs1)");
+ break;
+ // TODO(riscv): use D Extension macro block
+ case RO_FSD:
+ Format(instr, "fsd 'fs2, 'offS('rs1)");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
}
}
-
void Decoder::DecodeBType(Instruction* instr) {
switch (instr->InstructionBits() & kBTypeMask) {
case RO_BEQ:
@@ -1595,6 +1706,7 @@ void Decoder::DecodeUType(Instruction* instr) {
UNSUPPORTED_RISCV();
}
}
+// namespace internal
void Decoder::DecodeJType(Instruction* instr) {
// J Type doesn't have additional mask
switch (instr->BaseOpcodeValue()) {
@@ -1791,6 +1903,511 @@ void Decoder::DecodeCBType(Instruction* instr) {
}
}
+void Decoder::DecodeRvvIVV(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVV);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VADD_VV:
+ Format(instr, "vadd.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VSADD_VV:
+ Format(instr, "vsadd.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VSUB_VV:
+ Format(instr, "vsub.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VSSUB_VV:
+ Format(instr, "vssub.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMIN_VV:
+ Format(instr, "vmin.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMINU_VV:
+ Format(instr, "vminu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMAX_VV:
+ Format(instr, "vmax.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMAXU_VV:
+ Format(instr, "vmaxu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VAND_VV:
+ Format(instr, "vand.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VOR_VV:
+ Format(instr, "vor.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VXOR_VV:
+ Format(instr, "vxor.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VRGATHER_VV:
+ Format(instr, "vrgather.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMSEQ_VV:
+ Format(instr, "vmseq.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMSNE_VV:
+ Format(instr, "vmsne.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMSLT_VV:
+ Format(instr, "vmslt.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMSLTU_VV:
+ Format(instr, "vmsltu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMSLE_VV:
+ Format(instr, "vmsle.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMSLEU_VV:
+ Format(instr, "vmsleu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMV_VV:
+ if (instr->RvvVM()) {
+ Format(instr, "vmv.vv 'vd, 'vs1");
+ } else {
+ Format(instr, "vmerge.vvm 'vd, 'vs2, 'vs1, v0");
+ }
+ break;
+ case RO_V_VADC_VV:
+ if (!instr->RvvVM()) {
+ Format(instr, "vadc.vvm 'vd, 'vs2, 'vs1");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case RO_V_VMADC_VV:
+ if (!instr->RvvVM()) {
+ Format(instr, "vmadc.vvm 'vd, 'vs2, 'vs1");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Decoder::DecodeRvvIVI(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVI);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VADD_VI:
+ Format(instr, "vadd.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VSADD_VI:
+ Format(instr, "vsadd.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VRSUB_VI:
+ Format(instr, "vrsub.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VAND_VI:
+ Format(instr, "vand.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VOR_VI:
+ Format(instr, "vor.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VXOR_VI:
+ Format(instr, "vxor.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VRGATHER_VI:
+ Format(instr, "vrgather.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VMV_VI:
+ if (instr->RvvVM()) {
+ Format(instr, "vmv.vi 'vd, 'simm5");
+ } else {
+ Format(instr, "vmerge.vim 'vd, 'vs2, 'simm5, v0");
+ }
+ break;
+ case RO_V_VMSEQ_VI:
+ Format(instr, "vmseq.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VMSNE_VI:
+ Format(instr, "vmsne.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VMSLEU_VI:
+ Format(instr, "vmsleu.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VMSLE_VI:
+ Format(instr, "vmsle.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VMSGTU_VI:
+ Format(instr, "vmsgtu.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VMSGT_VI:
+ Format(instr, "vmsgt.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VSLIDEDOWN_VI:
+ Format(instr, "vslidedown.vi 'vd, 'vs2, 'uimm5'vm");
+ break;
+ case RO_V_VSRL_VI:
+ Format(instr, "vsrl.vi 'vd, 'vs2, 'uimm5'vm");
+ break;
+ case RO_V_VSLL_VI:
+ Format(instr, "vsll.vi 'vd, 'vs2, 'uimm5'vm");
+ break;
+ case RO_V_VADC_VI:
+ if (!instr->RvvVM()) {
+ Format(instr, "vadc.vim 'vd, 'vs2, 'uimm5");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case RO_V_VMADC_VI:
+ if (!instr->RvvVM()) {
+ Format(instr, "vmadc.vim 'vd, 'vs2, 'uimm5");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Decoder::DecodeRvvIVX(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVX);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VADD_VX:
+ Format(instr, "vadd.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VSADD_VX:
+ Format(instr, "vsadd.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VSUB_VX:
+ Format(instr, "vsub.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VSSUB_VX:
+ Format(instr, "vssub.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VRSUB_VX:
+ Format(instr, "vrsub.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMIN_VX:
+ Format(instr, "vmin.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMINU_VX:
+ Format(instr, "vminu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMAX_VX:
+ Format(instr, "vmax.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMAXU_VX:
+ Format(instr, "vmaxu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VAND_VX:
+ Format(instr, "vand.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VOR_VX:
+ Format(instr, "vor.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VXOR_VX:
+ Format(instr, "vxor.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VRGATHER_VX:
+ Format(instr, "vrgather.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMV_VX:
+ if (instr->RvvVM()) {
+ Format(instr, "vmv.vx 'vd, 'rs1");
+ } else {
+ Format(instr, "vmerge.vxm 'vd, 'vs2, 'rs1, v0");
+ }
+ break;
+ case RO_V_VMSEQ_VX:
+ Format(instr, "vmseq.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSNE_VX:
+ Format(instr, "vmsne.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSLT_VX:
+ Format(instr, "vmslt.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSLTU_VX:
+ Format(instr, "vmsltu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSLE_VX:
+ Format(instr, "vmsle.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSLEU_VX:
+ Format(instr, "vmsleu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSGT_VX:
+ Format(instr, "vmsgt.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSGTU_VX:
+ Format(instr, "vmsgtu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VSLIDEDOWN_VX:
+ Format(instr, "vslidedown.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VADC_VX:
+ if (!instr->RvvVM()) {
+ Format(instr, "vadc.vxm 'vd, 'vs2, 'rs1");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case RO_V_VMADC_VX:
+ if (!instr->RvvVM()) {
+ Format(instr, "vmadc.vxm 'vd, 'vs2, 'rs1");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Decoder::DecodeRvvMVV(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_MVV);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VWXUNARY0:
+ if (instr->Vs1Value() == 0x0) {
+ Format(instr, "vmv.x.s 'rd, 'vs2");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ case RO_V_VREDMAXU:
+ Format(instr, "vredmaxu.vs 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VREDMAX:
+ Format(instr, "vredmax.vs 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VREDMIN:
+ Format(instr, "vredmin.vs 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VREDMINU:
+ Format(instr, "vredminu.vs 'vd, 'vs2, 'vs1'vm");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Decoder::DecodeRvvMVX(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_MVX);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VRXUNARY0:
+ if (instr->Vs2Value() == 0x0) {
+ Format(instr, "vmv.s.x 'vd, 'rs1");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Decoder::DecodeVType(Instruction* instr) {
+ switch (instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask)) {
+ case OP_IVV:
+ DecodeRvvIVV(instr);
+ return;
+ case OP_FVV:
+ UNSUPPORTED_RISCV();
+ return;
+ case OP_MVV:
+ DecodeRvvMVV(instr);
+ return;
+ case OP_IVI:
+ DecodeRvvIVI(instr);
+ return;
+ case OP_IVX:
+ DecodeRvvIVX(instr);
+ return;
+ case OP_FVF:
+ UNSUPPORTED_RISCV();
+ return;
+ case OP_MVX:
+ DecodeRvvMVX(instr);
+ return;
+ }
+ switch (instr->InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0x80000000)) {
+ case RO_V_VSETVLI:
+ Format(instr, "vsetvli 'rd, 'rs1, 'sew, 'lmul");
+ break;
+ case RO_V_VSETVL:
+ if (!(instr->InstructionBits() & 0x40000000)) {
+ Format(instr, "vsetvl 'rd, 'rs1, 'rs2");
+ } else {
+ Format(instr, "vsetivli 'rd, 'uimm, 'sew, 'lmul");
+ }
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+int Decoder::switch_nf(Instruction* instr) {
+ int nf = 0;
+ switch (instr->InstructionBits() & kRvvNfMask) {
+ case 0x20000000:
+ nf = 2;
+ break;
+ case 0x40000000:
+ nf = 3;
+ break;
+ case 0x60000000:
+ nf = 4;
+ break;
+ case 0x80000000:
+ nf = 5;
+ break;
+ case 0xa0000000:
+ nf = 6;
+ break;
+ case 0xc0000000:
+ nf = 7;
+ break;
+ case 0xe0000000:
+ nf = 8;
+ break;
+ }
+ return nf;
+}
+void Decoder::DecodeRvvVL(Instruction* instr) {
+ char str[50];
+ uint32_t instr_temp =
+ instr->InstructionBits() & (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask);
+ // switch (instr->InstructionBits() &
+ // (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask)) {
+ if (RO_V_VL == instr_temp) {
+ if (!(instr->InstructionBits() & (kRvvRs2Mask))) {
+ snprintf(str, sizeof(str), "vle%d.v 'vd, ('rs1)'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ } else {
+ snprintf(str, sizeof(str), "vle%dff.v 'vd, ('rs1)'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ }
+ } else if (RO_V_VLS == instr_temp) {
+ snprintf(str, sizeof(str), "vlse%d.v 'vd, ('rs1), 'rs2'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+
+ } else if (RO_V_VLX == instr_temp) {
+ snprintf(str, sizeof(str), "vlxei%d.v 'vd, ('rs1), 'vs2'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VLSEG2 == instr_temp || RO_V_VLSEG3 == instr_temp ||
+ RO_V_VLSEG4 == instr_temp || RO_V_VLSEG5 == instr_temp ||
+ RO_V_VLSEG6 == instr_temp || RO_V_VLSEG7 == instr_temp ||
+ RO_V_VLSEG8 == instr_temp) {
+ if (!(instr->InstructionBits() & (kRvvRs2Mask))) {
+ snprintf(str, sizeof(str), "vlseg%de%d.v 'vd, ('rs1)'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ } else {
+ snprintf(str, sizeof(str), "vlseg%de%dff.v 'vd, ('rs1)'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ }
+ Format(instr, str);
+ } else if (RO_V_VLSSEG2 == instr_temp || RO_V_VLSSEG3 == instr_temp ||
+ RO_V_VLSSEG4 == instr_temp || RO_V_VLSSEG5 == instr_temp ||
+ RO_V_VLSSEG6 == instr_temp || RO_V_VLSSEG7 == instr_temp ||
+ RO_V_VLSSEG8 == instr_temp) {
+ snprintf(str, sizeof(str), "vlsseg%de%d.v 'vd, ('rs1), 'rs2'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VLXSEG2 == instr_temp || RO_V_VLXSEG3 == instr_temp ||
+ RO_V_VLXSEG4 == instr_temp || RO_V_VLXSEG5 == instr_temp ||
+ RO_V_VLXSEG6 == instr_temp || RO_V_VLXSEG7 == instr_temp ||
+ RO_V_VLXSEG8 == instr_temp) {
+ snprintf(str, sizeof(str), "vlxseg%dei%d.v 'vd, ('rs1), 'vs2'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ Format(instr, str);
+ }
+}
+
+int Decoder::switch_sew(Instruction* instr) {
+ int width = 0;
+ if ((instr->InstructionBits() & kBaseOpcodeMask) != LOAD_FP &&
+ (instr->InstructionBits() & kBaseOpcodeMask) != STORE_FP)
+ return -1;
+ switch (instr->InstructionBits() & (kRvvWidthMask | kRvvMewMask)) {
+ case 0x0:
+ width = 8;
+ break;
+ case 0x00005000:
+ width = 16;
+ break;
+ case 0x00006000:
+ width = 32;
+ break;
+ case 0x00007000:
+ width = 64;
+ break;
+ case 0x10000000:
+ width = 128;
+ break;
+ case 0x10005000:
+ width = 256;
+ break;
+ case 0x10006000:
+ width = 512;
+ break;
+ case 0x10007000:
+ width = 1024;
+ break;
+ default:
+ width = -1;
+ break;
+ }
+ return width;
+}
+
+void Decoder::DecodeRvvVS(Instruction* instr) {
+ char str[50];
+ uint32_t instr_temp =
+ instr->InstructionBits() & (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask);
+ if (RO_V_VS == instr_temp) {
+ snprintf(str, sizeof(str), "vse%d.v 'vd, ('rs1)'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VSS == instr_temp) {
+ snprintf(str, sizeof(str), "vsse%d.v 'vd, ('rs1), 'rs2'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VSX == instr_temp) {
+ snprintf(str, sizeof(str), "vsxei%d.v 'vd, ('rs1), 'vs2'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VSU == instr_temp) {
+ snprintf(str, sizeof(str), "vsuxei%d.v 'vd, ('rs1), 'vs2'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VSSEG2 == instr_temp || RO_V_VSSEG3 == instr_temp ||
+ RO_V_VSSEG4 == instr_temp || RO_V_VSSEG5 == instr_temp ||
+ RO_V_VSSEG6 == instr_temp || RO_V_VSSEG7 == instr_temp ||
+ RO_V_VSSEG8 == instr_temp) {
+ snprintf(str, sizeof(str), "vsseg%de%d.v 'vd, ('rs1)'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VSSSEG2 == instr_temp || RO_V_VSSSEG3 == instr_temp ||
+ RO_V_VSSSEG4 == instr_temp || RO_V_VSSSEG5 == instr_temp ||
+ RO_V_VSSSEG6 == instr_temp || RO_V_VSSSEG7 == instr_temp ||
+ RO_V_VSSSEG8 == instr_temp) {
+ snprintf(str, sizeof(str), "vssseg%de%d.v 'vd, ('rs1), 'rs2'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VSXSEG2 == instr_temp || RO_V_VSXSEG3 == instr_temp ||
+ RO_V_VSXSEG4 == instr_temp || RO_V_VSXSEG5 == instr_temp ||
+ RO_V_VSXSEG6 == instr_temp || RO_V_VSXSEG7 == instr_temp ||
+ RO_V_VSXSEG8 == instr_temp) {
+ snprintf(str, sizeof(str), "vsxseg%dei%d.v 'vd, ('rs1), 'vs2'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ Format(instr, str);
+ }
+}
+
// Disassemble the instruction at *instr_ptr into the output buffer.
// All instructions are one word long, except for the simulator
// pseudo-instruction stop(msg). For that one special case, we return
@@ -1849,6 +2466,9 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
case Instruction::kCBType:
DecodeCBType(instr);
break;
+ case Instruction::kVType:
+ DecodeVType(instr);
+ break;
default:
Format(instr, "UNSUPPORTED");
UNSUPPORTED_RISCV();
@@ -1882,7 +2502,7 @@ const char* NameConverter::NameOfXMMRegister(int reg) const {
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // RISC-V does not have the concept of a byte register.
- return "nobytereg";
+ //return "nobytereg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/deps/v8/src/diagnostics/s390/eh-frame-s390.cc b/deps/v8/src/diagnostics/s390/eh-frame-s390.cc
index 4f5994c8da..6da3095e86 100644
--- a/deps/v8/src/diagnostics/s390/eh-frame-s390.cc
+++ b/deps/v8/src/diagnostics/s390/eh-frame-s390.cc
@@ -38,7 +38,6 @@ int EhFrameWriter::RegisterToDwarfCode(Register name) {
return kR0DwarfCode;
default:
UNIMPLEMENTED();
- return -1;
}
}
@@ -55,7 +54,6 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "sp";
default:
UNIMPLEMENTED();
- return nullptr;
}
}
diff --git a/deps/v8/src/diagnostics/system-jit-win.cc b/deps/v8/src/diagnostics/system-jit-win.cc
index c77c223183..5ca36e67e6 100644
--- a/deps/v8/src/diagnostics/system-jit-win.cc
+++ b/deps/v8/src/diagnostics/system-jit-win.cc
@@ -4,7 +4,11 @@
#include "src/diagnostics/system-jit-win.h"
-#include "include/v8.h"
+#include "include/v8-callbacks.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive.h"
+#include "include/v8-script.h"
#include "src/api/api-inl.h"
#include "src/base/lazy-instance.h"
#include "src/base/logging.h"
diff --git a/deps/v8/src/diagnostics/unwinder.cc b/deps/v8/src/diagnostics/unwinder.cc
index 68ff679595..00a5e7dbe6 100644
--- a/deps/v8/src/diagnostics/unwinder.cc
+++ b/deps/v8/src/diagnostics/unwinder.cc
@@ -6,7 +6,7 @@
#include <algorithm>
-#include "include/v8.h"
+#include "include/v8-unwinder.h"
#include "src/execution/frame-constants.h"
#include "src/execution/pointer-authentication.h"
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index 2a0cf4ff02..d50767421a 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -22,36 +22,6 @@
// This has to come after windows.h.
#include <versionhelpers.h> // For IsWindows8OrGreater().
-// Forward declaration to keep this independent of Win8
-NTSYSAPI
-DWORD
-NTAPI
-RtlAddGrowableFunctionTable(
- _Out_ PVOID* DynamicTable,
- _In_reads_(MaximumEntryCount) PRUNTIME_FUNCTION FunctionTable,
- _In_ DWORD EntryCount,
- _In_ DWORD MaximumEntryCount,
- _In_ ULONG_PTR RangeBase,
- _In_ ULONG_PTR RangeEnd
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlGrowFunctionTable(
- _Inout_ PVOID DynamicTable,
- _In_ DWORD NewEntryCount
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlDeleteGrowableFunctionTable(
- _In_ PVOID DynamicTable
- );
-
namespace v8 {
namespace internal {
namespace win64_unwindinfo {
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.h b/deps/v8/src/diagnostics/unwinding-info-win64.h
index ca66437e00..bb32f49e5d 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.h
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.h
@@ -5,7 +5,9 @@
#ifndef V8_DIAGNOSTICS_UNWINDING_INFO_WIN64_H_
#define V8_DIAGNOSTICS_UNWINDING_INFO_WIN64_H_
-#include "include/v8.h"
+#include <vector>
+
+#include "include/v8-callbacks.h"
#include "include/v8config.h"
#include "src/common/globals.h"
diff --git a/deps/v8/src/diagnostics/x64/disasm-x64.cc b/deps/v8/src/diagnostics/x64/disasm-x64.cc
index 3ddb29e064..ce0a8a4b3f 100644
--- a/deps/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/deps/v8/src/diagnostics/x64/disasm-x64.cc
@@ -244,8 +244,9 @@ static const InstructionDesc cmov_instructions[16] = {
{"cmovle", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
{"cmovg", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false}};
-static const char* const cmp_pseudo_op[8] = {"eq", "lt", "le", "unord",
- "neq", "nlt", "nle", "ord"};
+static const char* const cmp_pseudo_op[16] = {
+ "eq", "lt", "le", "unord", "neq", "nlt", "nle", "ord",
+ "eq_uq", "nge", "ngt", "false", "neq_oq", "ge", "gt", "true"};
namespace {
int8_t Imm8(const uint8_t* data) {
@@ -279,6 +280,10 @@ int64_t Imm64(const uint8_t* data) {
//------------------------------------------------------------------------------
// DisassemblerX64 implementation.
+// Forward-declare NameOfYMMRegister to keep its implementation with the
+// NameConverter methods and register name arrays at bottom.
+const char* NameOfYMMRegister(int reg);
+
// A new DisassemblerX64 object is created to disassemble each instruction.
// The object can only disassemble a single instruction.
class DisassemblerX64 {
@@ -356,6 +361,12 @@ class DisassemblerX64 {
return (checked & 4) == 0;
}
+ bool vex_256() const {
+ DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
+ byte checked = vex_byte0_ == VEX3_PREFIX ? vex_byte2_ : vex_byte1_;
+ return (checked & 4) != 0;
+ }
+
bool vex_none() {
DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
byte checked = vex_byte0_ == VEX3_PREFIX ? vex_byte2_ : vex_byte1_;
@@ -424,6 +435,14 @@ class DisassemblerX64 {
return converter_.NameOfXMMRegister(reg);
}
+ const char* NameOfAVXRegister(int reg) const {
+ if (vex_256()) {
+ return NameOfYMMRegister(reg);
+ } else {
+ return converter_.NameOfXMMRegister(reg);
+ }
+ }
+
const char* NameOfAddress(byte* addr) const {
return converter_.NameOfAddress(addr);
}
@@ -448,6 +467,7 @@ class DisassemblerX64 {
int PrintRightOperand(byte* modrmp);
int PrintRightByteOperand(byte* modrmp);
int PrintRightXMMOperand(byte* modrmp);
+ int PrintRightAVXOperand(byte* modrmp);
int PrintOperands(const char* mnem, OperandType op_order, byte* data);
int PrintImmediate(byte* data, OperandSize size);
int PrintImmediateOp(byte* data);
@@ -606,6 +626,10 @@ int DisassemblerX64::PrintRightXMMOperand(byte* modrmp) {
return PrintRightOperandHelper(modrmp, &DisassemblerX64::NameOfXMMRegister);
}
+int DisassemblerX64::PrintRightAVXOperand(byte* modrmp) {
+ return PrintRightOperandHelper(modrmp, &DisassemblerX64::NameOfAVXRegister);
+}
+
// Returns number of bytes used including the current *data.
// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
int DisassemblerX64::PrintOperands(const char* mnem, OperandType op_order,
@@ -866,78 +890,98 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x18:
- AppendToBuffer("vbroadcastss %s,", NameOfXMMRegister(regop));
+ AppendToBuffer("vbroadcastss %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
+ break;
+ case 0x98:
+ AppendToBuffer("vfmadd132p%c %s,%s,", float_size_code(),
+ NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x99:
AppendToBuffer("vfmadd132s%c %s,%s,", float_size_code(),
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ break;
+ case 0xA8:
+ AppendToBuffer("vfmadd213p%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xA9:
AppendToBuffer("vfmadd213s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xB8:
AppendToBuffer("vfmadd231p%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xB9:
AppendToBuffer("vfmadd231s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x9B:
AppendToBuffer("vfmsub132s%c %s,%s,", float_size_code(),
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ break;
+ case 0x9C:
+ AppendToBuffer("vfnmadd132p%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xAB:
AppendToBuffer("vfmsub213s%c %s,%s,", float_size_code(),
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ break;
+ case 0xAC:
+ AppendToBuffer("vfnmadd213p%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xBB:
AppendToBuffer("vfmsub231s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xBC:
AppendToBuffer("vfnmadd231p%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x9D:
AppendToBuffer("vfnmadd132s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xAD:
AppendToBuffer("vfnmadd213s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xBD:
AppendToBuffer("vfnmadd231s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x9F:
AppendToBuffer("vfnmsub132s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xAF:
AppendToBuffer("vfnmsub213s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xBF:
AppendToBuffer("vfnmsub231s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xF7:
AppendToBuffer("shlx%c %s,", operand_size_code(),
@@ -948,9 +992,9 @@ int DisassemblerX64::AVXInstruction(byte* data) {
#define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, notUsed3, \
opcode) \
case 0x##opcode: { \
- AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
- NameOfXMMRegister(vvvv)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("v" #instruction " %s,%s,", NameOfAVXRegister(regop), \
+ NameOfAVXRegister(vvvv)); \
+ current += PrintRightAVXOperand(current); \
break; \
}
@@ -962,8 +1006,8 @@ int DisassemblerX64::AVXInstruction(byte* data) {
#define DECLARE_SSE_UNOP_AVX_DIS_CASE(instruction, notUsed1, notUsed2, \
notUsed3, opcode) \
case 0x##opcode: { \
- AppendToBuffer("v" #instruction " %s,", NameOfXMMRegister(regop)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("v" #instruction " %s,", NameOfAVXRegister(regop)); \
+ current += PrintRightAVXOperand(current); \
break; \
}
SSSE3_UNOP_INSTRUCTION_LIST(DECLARE_SSE_UNOP_AVX_DIS_CASE)
@@ -972,8 +1016,8 @@ int DisassemblerX64::AVXInstruction(byte* data) {
#define DISASSEMBLE_AVX2_BROADCAST(instruction, _1, _2, _3, code) \
case 0x##code: \
- AppendToBuffer("" #instruction " %s,", NameOfXMMRegister(regop)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("" #instruction " %s,", NameOfAVXRegister(regop)); \
+ current += PrintRightAVXOperand(current); \
break;
AVX2_BROADCAST_LIST(DISASSEMBLE_AVX2_BROADCAST)
#undef DISASSEMBLE_AVX2_BROADCAST
@@ -986,96 +1030,96 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x08:
- AppendToBuffer("vroundps %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vroundps %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x09:
- AppendToBuffer("vroundpd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vroundpd %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x0A:
- AppendToBuffer("vroundss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vroundss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x0B:
- AppendToBuffer("vroundsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vroundsd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x0E:
- AppendToBuffer("vpblendw %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vpblendw %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x0F:
- AppendToBuffer("vpalignr %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vpalignr %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x14:
AppendToBuffer("vpextrb ");
current += PrintRightByteOperand(current);
- AppendToBuffer(",%s,0x%x,", NameOfXMMRegister(regop), *current++);
+ AppendToBuffer(",%s,0x%x,", NameOfAVXRegister(regop), *current++);
break;
case 0x15:
AppendToBuffer("vpextrw ");
current += PrintRightOperand(current);
- AppendToBuffer(",%s,0x%x,", NameOfXMMRegister(regop), *current++);
+ AppendToBuffer(",%s,0x%x,", NameOfAVXRegister(regop), *current++);
break;
case 0x16:
AppendToBuffer("vpextr%c ", rex_w() ? 'q' : 'd');
current += PrintRightOperand(current);
- AppendToBuffer(",%s,0x%x,", NameOfXMMRegister(regop), *current++);
+ AppendToBuffer(",%s,0x%x,", NameOfAVXRegister(regop), *current++);
break;
case 0x17:
AppendToBuffer("vextractps ");
current += PrintRightOperand(current);
- AppendToBuffer(",%s,0x%x,", NameOfXMMRegister(regop), *current++);
+ AppendToBuffer(",%s,0x%x,", NameOfAVXRegister(regop), *current++);
break;
case 0x20:
- AppendToBuffer("vpinsrb %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
+ AppendToBuffer("vpinsrb %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
current += PrintRightByteOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x21:
- AppendToBuffer("vinsertps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vinsertps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x22:
AppendToBuffer("vpinsr%c %s,%s,", rex_w() ? 'q' : 'd',
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
current += PrintRightOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x4A: {
- AppendToBuffer("vblendvps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister((*current++) >> 4));
+ AppendToBuffer("vblendvps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister((*current++) >> 4));
break;
}
case 0x4B: {
- AppendToBuffer("vblendvpd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister((*current++) >> 4));
+ AppendToBuffer("vblendvpd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister((*current++) >> 4));
break;
}
case 0x4C: {
- AppendToBuffer("vpblendvb %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister((*current++) >> 4));
+ AppendToBuffer("vpblendvb %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister((*current++) >> 4));
break;
}
default:
@@ -1086,95 +1130,95 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x10:
- AppendToBuffer("vmovss %s,", NameOfXMMRegister(regop));
+ AppendToBuffer("vmovss %s,", NameOfAVXRegister(regop));
if (mod == 3) {
- AppendToBuffer("%s,", NameOfXMMRegister(vvvv));
+ AppendToBuffer("%s,", NameOfAVXRegister(vvvv));
}
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0x11:
AppendToBuffer("vmovss ");
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
if (mod == 3) {
- AppendToBuffer(",%s", NameOfXMMRegister(vvvv));
+ AppendToBuffer(",%s", NameOfAVXRegister(vvvv));
}
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x16:
- AppendToBuffer("vmovshdup %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovshdup %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x2A:
AppendToBuffer("%s %s,%s,", vex_w() ? "vcvtqsi2ss" : "vcvtlsi2ss",
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
current += PrintRightOperand(current);
break;
case 0x2C:
AppendToBuffer("vcvttss2si%s %s,", vex_w() ? "q" : "",
NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0x51:
- AppendToBuffer("vsqrtss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vsqrtss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x58:
- AppendToBuffer("vaddss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vaddss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x59:
- AppendToBuffer("vmulss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmulss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5A:
- AppendToBuffer("vcvtss2sd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vcvtss2sd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5B:
- AppendToBuffer("vcvttps2dq %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vcvttps2dq %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x5C:
- AppendToBuffer("vsubss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vsubss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5D:
- AppendToBuffer("vminss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vminss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5E:
- AppendToBuffer("vdivss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vdivss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5F:
- AppendToBuffer("vmaxss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmaxss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x6F:
- AppendToBuffer("vmovdqu %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovdqu %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x70:
- AppendToBuffer("vpshufhw %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vpshufhw %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x7F:
AppendToBuffer("vmovdqu ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0xE6:
- AppendToBuffer("vcvtdq2pd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vcvtdq2pd %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
default:
UnimplementedInstruction();
@@ -1184,92 +1228,92 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x10:
- AppendToBuffer("vmovsd %s,", NameOfXMMRegister(regop));
+ AppendToBuffer("vmovsd %s,", NameOfAVXRegister(regop));
if (mod == 3) {
- AppendToBuffer("%s,", NameOfXMMRegister(vvvv));
+ AppendToBuffer("%s,", NameOfAVXRegister(vvvv));
}
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0x11:
AppendToBuffer("vmovsd ");
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
if (mod == 3) {
- AppendToBuffer(",%s", NameOfXMMRegister(vvvv));
+ AppendToBuffer(",%s", NameOfAVXRegister(vvvv));
}
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x12:
- AppendToBuffer("vmovddup %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovddup %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x2A:
AppendToBuffer("%s %s,%s,", vex_w() ? "vcvtqsi2sd" : "vcvtlsi2sd",
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
current += PrintRightOperand(current);
break;
case 0x2C:
AppendToBuffer("vcvttsd2si%s %s,", vex_w() ? "q" : "",
NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0x2D:
AppendToBuffer("vcvtsd2si%s %s,", vex_w() ? "q" : "",
NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0x51:
- AppendToBuffer("vsqrtsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vsqrtsd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x58:
- AppendToBuffer("vaddsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vaddsd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x59:
- AppendToBuffer("vmulsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmulsd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5A:
- AppendToBuffer("vcvtsd2ss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vcvtsd2ss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5C:
- AppendToBuffer("vsubsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vsubsd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5D:
- AppendToBuffer("vminsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vminsd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5E:
- AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vdivsd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5F:
- AppendToBuffer("vmaxsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmaxsd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xF0:
- AppendToBuffer("vlddqu %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vlddqu %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x70:
- AppendToBuffer("vpshuflw %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vpshuflw %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x7C:
- AppendToBuffer("vhaddps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vhaddps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
default:
UnimplementedInstruction();
@@ -1387,90 +1431,90 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x10:
- AppendToBuffer("vmovups %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovups %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x11:
AppendToBuffer("vmovups ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x12:
if (mod == 0b11) {
- AppendToBuffer("vmovhlps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovhlps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
} else {
- AppendToBuffer("vmovlps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovlps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
}
break;
case 0x13:
AppendToBuffer("vmovlps ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x16:
if (mod == 0b11) {
- AppendToBuffer("vmovlhps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovlhps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
} else {
- AppendToBuffer("vmovhps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovhps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
}
break;
case 0x17:
AppendToBuffer("vmovhps ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x28:
- AppendToBuffer("vmovaps %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovaps %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x29:
AppendToBuffer("vmovaps ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x2E:
- AppendToBuffer("vucomiss %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vucomiss %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x50:
AppendToBuffer("vmovmskps %s,", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0xC2: {
- AppendToBuffer("vcmpps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vcmpps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(", (%s)", cmp_pseudo_op[*current]);
current += 1;
break;
}
case 0xC6: {
- AppendToBuffer("vshufps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vshufps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
}
#define SSE_UNOP_CASE(instruction, unused, code) \
case 0x##code: \
- AppendToBuffer("v" #instruction " %s,", NameOfXMMRegister(regop)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("v" #instruction " %s,", NameOfAVXRegister(regop)); \
+ current += PrintRightAVXOperand(current); \
break;
SSE_UNOP_INSTRUCTION_LIST(SSE_UNOP_CASE)
#undef SSE_UNOP_CASE
#define SSE_BINOP_CASE(instruction, unused, code) \
case 0x##code: \
- AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
- NameOfXMMRegister(vvvv)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("v" #instruction " %s,%s,", NameOfAVXRegister(regop), \
+ NameOfAVXRegister(vvvv)); \
+ current += PrintRightAVXOperand(current); \
break;
SSE_BINOP_INSTRUCTION_LIST(SSE_BINOP_CASE)
#undef SSE_BINOP_CASE
@@ -1482,92 +1526,92 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x10:
- AppendToBuffer("vmovupd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovupd %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x11:
AppendToBuffer("vmovupd ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x28:
- AppendToBuffer("vmovapd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovapd %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x29:
AppendToBuffer("vmovapd ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x50:
AppendToBuffer("vmovmskpd %s,", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0x6E:
AppendToBuffer("vmov%c %s,", vex_w() ? 'q' : 'd',
- NameOfXMMRegister(regop));
+ NameOfAVXRegister(regop));
current += PrintRightOperand(current);
break;
case 0x6F:
- AppendToBuffer("vmovdqa %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovdqa %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x70:
- AppendToBuffer("vpshufd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vpshufd %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x71:
AppendToBuffer("vps%sw %s,", sf_str[regop / 2],
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",%u", *current++);
break;
case 0x72:
AppendToBuffer("vps%sd %s,", sf_str[regop / 2],
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",%u", *current++);
break;
case 0x73:
AppendToBuffer("vps%sq %s,", sf_str[regop / 2],
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",%u", *current++);
break;
case 0x7E:
AppendToBuffer("vmov%c ", vex_w() ? 'q' : 'd');
current += PrintRightOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0xC2: {
- AppendToBuffer("vcmppd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vcmppd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(", (%s)", cmp_pseudo_op[*current]);
current += 1;
break;
}
case 0xC4:
- AppendToBuffer("vpinsrw %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
+ AppendToBuffer("vpinsrw %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
current += PrintRightOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0xC5:
AppendToBuffer("vpextrw %s,", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0xD7:
AppendToBuffer("vpmovmskb %s,", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
#define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, opcode) \
case 0x##opcode: { \
- AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
- NameOfXMMRegister(vvvv)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("v" #instruction " %s,%s,", NameOfAVXRegister(regop), \
+ NameOfAVXRegister(vvvv)); \
+ current += PrintRightAVXOperand(current); \
break; \
}
@@ -1575,8 +1619,8 @@ int DisassemblerX64::AVXInstruction(byte* data) {
#undef DECLARE_SSE_AVX_DIS_CASE
#define DECLARE_SSE_UNOP_AVX_DIS_CASE(instruction, notUsed1, notUsed2, opcode) \
case 0x##opcode: { \
- AppendToBuffer("v" #instruction " %s,", NameOfXMMRegister(regop)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("v" #instruction " %s,", NameOfAVXRegister(regop)); \
+ current += PrintRightAVXOperand(current); \
break; \
}
@@ -2823,6 +2867,10 @@ static const char* const xmm_regs[16] = {
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"};
+static const char* const ymm_regs[16] = {
+ "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7",
+ "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15"};
+
const char* NameConverter::NameOfAddress(byte* addr) const {
v8::base::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
return tmp_buffer_.begin();
@@ -2847,6 +2895,11 @@ const char* NameConverter::NameOfXMMRegister(int reg) const {
return "noxmmreg";
}
+const char* NameOfYMMRegister(int reg) {
+ if (0 <= reg && reg < 16) return ymm_regs[reg];
+ return "noymmreg";
+}
+
const char* NameConverter::NameInCode(byte* addr) const {
// X64 does not embed debug strings at the moment.
UNREACHABLE();
diff --git a/deps/v8/src/execution/OWNERS b/deps/v8/src/execution/OWNERS
index 1a987f65e7..921f4f742a 100644
--- a/deps/v8/src/execution/OWNERS
+++ b/deps/v8/src/execution/OWNERS
@@ -1,7 +1,6 @@
ishell@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
-mythria@chromium.org
delphick@chromium.org
verwaest@chromium.org
victorgomes@chromium.org
diff --git a/deps/v8/src/execution/arm/simulator-arm.cc b/deps/v8/src/execution/arm/simulator-arm.cc
index ec9c05af69..310ddab523 100644
--- a/deps/v8/src/execution/arm/simulator-arm.cc
+++ b/deps/v8/src/execution/arm/simulator-arm.cc
@@ -114,14 +114,10 @@ bool ArmDebugger::GetValue(const char* desc, int32_t* value) {
if (regnum != kNoRegister) {
*value = GetRegisterValue(regnum);
return true;
- } else {
- if (strncmp(desc, "0x", 2) == 0) {
- return SScanF(desc + 2, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
- } else {
- return SScanF(desc, "%u", reinterpret_cast<uint32_t*>(value)) == 1;
- }
}
- return false;
+ if (strncmp(desc, "0x", 2) == 0)
+ return SScanF(desc + 2, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
+ return SScanF(desc, "%u", reinterpret_cast<uint32_t*>(value)) == 1;
}
bool ArmDebugger::GetVFPSingleValue(const char* desc, float* value) {
@@ -1192,7 +1188,6 @@ bool Simulator::ConditionallyExecute(Instruction* instr) {
default:
UNREACHABLE();
}
- return false;
}
// Calculate and set the Negative and Zero flags.
@@ -1314,7 +1309,6 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
// by immediate
if ((shift == ROR) && (shift_amount == 0)) {
UNIMPLEMENTED();
- return result;
} else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
shift_amount = 32;
}
@@ -1373,7 +1367,6 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
default: {
UNREACHABLE();
- break;
}
}
} else {
@@ -1451,7 +1444,6 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
default: {
UNREACHABLE();
- break;
}
}
}
@@ -1486,7 +1478,6 @@ int32_t Simulator::ProcessPU(Instruction* instr, int num_regs, int reg_size,
switch (instr->PUField()) {
case da_x: {
UNIMPLEMENTED();
- break;
}
case ia_x: {
*start_address = rn_val;
@@ -1717,7 +1708,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
@@ -1769,7 +1759,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
switch (redirection->type()) {
@@ -1783,7 +1772,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -2121,7 +2109,6 @@ void Simulator::DecodeType01(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
} else {
// The instruction is documented as strex rd, rt, [rn], but the
@@ -2165,7 +2152,6 @@ void Simulator::DecodeType01(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
}
} else {
@@ -2219,7 +2205,6 @@ void Simulator::DecodeType01(Instruction* instr) {
default: {
// The PU field is a 2-bit field.
UNREACHABLE();
- break;
}
}
} else {
@@ -2262,7 +2247,6 @@ void Simulator::DecodeType01(Instruction* instr) {
default: {
// The PU field is a 2-bit field.
UNREACHABLE();
- break;
}
}
}
@@ -2600,7 +2584,6 @@ void Simulator::DecodeType01(Instruction* instr) {
default: {
UNREACHABLE();
- break;
}
}
}
@@ -2680,7 +2663,6 @@ void Simulator::DecodeType3(Instruction* instr) {
DCHECK(!instr->HasW());
Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
UNIMPLEMENTED();
- break;
}
case ia_x: {
if (instr->Bit(4) == 0) {
@@ -2714,10 +2696,8 @@ void Simulator::DecodeType3(Instruction* instr) {
break;
case 1:
UNIMPLEMENTED();
- break;
case 2:
UNIMPLEMENTED();
- break;
case 3: {
// Usat.
int32_t sat_pos = instr->Bits(20, 16);
@@ -2746,7 +2726,6 @@ void Simulator::DecodeType3(Instruction* instr) {
switch (instr->Bits(22, 21)) {
case 0:
UNIMPLEMENTED();
- break;
case 1:
if (instr->Bits(9, 6) == 1) {
if (instr->Bit(20) == 0) {
@@ -3442,7 +3421,6 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
set_neon_register(vd, q_data);
}
@@ -4433,7 +4411,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
break;
}
@@ -4469,13 +4446,11 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
break;
}
default:
UNREACHABLE();
- break;
}
} else if (opc1 == 0 && (opc2 == 0b0100 || opc2 == 0b0101)) {
DCHECK_EQ(1, instr->Bit(6)); // Only support Q regs.
@@ -4625,7 +4600,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
}
} else if (opc1 == 0b01 && (opc2 & 0b0111) == 0b111) {
@@ -4654,7 +4628,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
}
} else if (opc1 == 0b10 && opc2 == 0b0001) {
@@ -4674,7 +4647,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else {
int Vd = instr->VFPDRegValue(kDoublePrecision);
@@ -4692,7 +4664,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (opc1 == 0b10 && (opc2 & 0b1110) == 0b0010) {
@@ -4714,7 +4685,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else {
// vuzp.<size> Qd, Qm.
@@ -4730,7 +4700,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
} else {
@@ -4747,10 +4716,8 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
case Neon32:
UNIMPLEMENTED();
- break;
default:
UNREACHABLE();
- break;
}
} else {
// vuzp.<size> Dd, Dm.
@@ -4763,10 +4730,8 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
case Neon32:
UNIMPLEMENTED();
- break;
default:
UNREACHABLE();
- break;
}
}
}
@@ -4811,7 +4776,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
}
case Neon64:
UNREACHABLE();
- break;
}
} else if (opc1 == 0b10 && instr->Bit(10) == 1) {
// vrint<q>.<dt> <Dd>, <Dm>
@@ -5078,7 +5042,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 1 && sz == 2 && q && op1) {
// vmov Qd, Qm.
@@ -5134,7 +5097,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 3) {
// vcge/vcgt.s<size> Qd, Qm, Qn.
@@ -5152,7 +5114,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 4 && !op1) {
// vshl s<size> Qd, Qm, Qn.
@@ -5172,7 +5133,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 6) {
// vmin/vmax.s<size> Qd, Qm, Qn.
@@ -5190,7 +5150,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 8 && op1) {
// vtst.i<size> Qd, Qm, Qn.
@@ -5207,7 +5166,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 8 && !op1) {
// vadd.i<size> Qd, Qm, Qn.
@@ -5241,7 +5199,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 0xA) {
// vpmin/vpmax.s<size> Dd, Dm, Dn.
@@ -5259,7 +5216,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 0xB) {
// vpadd.i<size> Dd, Dm, Dn.
@@ -5276,7 +5232,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 0xD && !op1) {
float src1[4], src2[4];
@@ -5347,7 +5302,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 1 && sz == 1 && op1) {
// vbsl.size Qd, Qm, Qn.
@@ -5388,7 +5342,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 2 && op1) {
// vqsub.u<size> Qd, Qm, Qn.
@@ -5405,7 +5358,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 3) {
// vcge/vcgt.u<size> Qd, Qm, Qn.
@@ -5423,7 +5375,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 4 && !op1) {
// vshl u<size> Qd, Qm, Qn.
@@ -5443,7 +5394,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 6) {
// vmin/vmax.u<size> Qd, Qm, Qn.
@@ -5461,7 +5411,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 8 && !op1) {
// vsub.size Qd, Qm, Qn.
@@ -5495,7 +5444,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 0xA) {
// vpmin/vpmax.u<size> Dd, Dm, Dn.
@@ -5513,7 +5461,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 0xD && sz == 0 && q && op1) {
// vmul.f32 Qd, Qn, Qm
@@ -5658,7 +5605,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
} else {
// vmovl signed
@@ -5677,7 +5623,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
}
} else if (!u && imm3H_L != 0 && opc == 0b0101) {
@@ -5721,7 +5666,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && imm3H_L != 0 && opc == 0b0101) {
// vsli.<size> Dd, Dm, shift
@@ -5743,7 +5687,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
}
@@ -5807,7 +5750,6 @@ void Simulator::DecodeAdvancedSIMDLoadStoreMultipleStructures(
break;
default:
UNIMPLEMENTED();
- break;
}
if (instr->Bit(21)) {
// vld1
@@ -5993,7 +5935,6 @@ void Simulator::DecodeFloatingPointDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
- break;
}
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
@@ -6019,7 +5960,6 @@ void Simulator::DecodeFloatingPointDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
- break;
}
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
@@ -6111,7 +6051,6 @@ void Simulator::DecodeFloatingPointDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
- break;
}
if (instr->SzValue() == 0x1) {
int n = instr->VFPNRegValue(kDoublePrecision);
@@ -6132,7 +6071,6 @@ void Simulator::DecodeFloatingPointDataProcessing(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
}
@@ -6201,7 +6139,6 @@ void Simulator::InstructionDecode(Instruction* instr) {
}
default: {
UNIMPLEMENTED();
- break;
}
}
}
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.cc b/deps/v8/src/execution/arm64/simulator-arm64.cc
index 324bdd99a8..5669838006 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.cc
+++ b/deps/v8/src/execution/arm64/simulator-arm64.cc
@@ -1517,7 +1517,6 @@ void Simulator::VisitPCRelAddressing(Instruction* instr) {
break;
case ADRP: // Not implemented in the assembler.
UNIMPLEMENTED();
- break;
default:
UNREACHABLE();
}
@@ -2212,7 +2211,6 @@ Simulator::TransactionSize Simulator::get_transaction_size(unsigned size) {
default:
UNREACHABLE();
}
- return TransactionSize::None;
}
void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
@@ -5210,7 +5208,6 @@ void Simulator::VisitNEONScalar2RegMisc(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
} else {
VectorFormat fpf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
diff --git a/deps/v8/src/execution/execution.cc b/deps/v8/src/execution/execution.cc
index 4b7b50bb0e..689d99057e 100644
--- a/deps/v8/src/execution/execution.cc
+++ b/deps/v8/src/execution/execution.cc
@@ -346,7 +346,6 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
// Placeholder for return value.
Object value;
-
Handle<Code> code =
JSEntry(isolate, params.execution_target, params.is_construct);
{
@@ -374,7 +373,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
Address** argv = reinterpret_cast<Address**>(params.argv);
RCS_SCOPE(isolate, RuntimeCallCounterId::kJS_Execution);
value = Object(stub_entry.Call(isolate->isolate_data()->isolate_root(),
- orig_func, func, recv, params.argc, argv));
+ orig_func, func, recv,
+ JSParameterCount(params.argc), argv));
} else {
DCHECK_EQ(Execution::Target::kRunMicrotasks, params.execution_target);
diff --git a/deps/v8/src/execution/frame-constants.h b/deps/v8/src/execution/frame-constants.h
index 1148a94212..d353a7092d 100644
--- a/deps/v8/src/execution/frame-constants.h
+++ b/deps/v8/src/execution/frame-constants.h
@@ -283,7 +283,9 @@ class BuiltinExitFrameConstants : public ExitFrameConstants {
static constexpr int kPaddingOffset = kArgcOffset + 1 * kSystemPointerSize;
static constexpr int kFirstArgumentOffset =
kPaddingOffset + 1 * kSystemPointerSize;
- static constexpr int kNumExtraArgsWithReceiver = 5;
+ static constexpr int kNumExtraArgsWithoutReceiver = 4;
+ static constexpr int kNumExtraArgsWithReceiver =
+ kNumExtraArgsWithoutReceiver + 1;
};
// Unoptimized frames are used for interpreted and baseline-compiled JavaScript
@@ -403,6 +405,8 @@ inline static int FrameSlotToFPOffset(int slot) {
#include "src/execution/mips/frame-constants-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/execution/mips64/frame-constants-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/execution/loong64/frame-constants-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/execution/s390/frame-constants-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index f24f183706..a388130ee3 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -206,19 +206,42 @@ int StackTraceFrameIterator::FrameFunctionCount() const {
return static_cast<int>(infos.size());
}
-bool StackTraceFrameIterator::IsValidFrame(StackFrame* frame) const {
+FrameSummary StackTraceFrameIterator::GetTopValidFrame() const {
+ DCHECK(!done());
+ // Like FrameSummary::GetTop, but additionally observes
+ // StackTraceFrameIterator filtering semantics.
+ std::vector<FrameSummary> frames;
+ frame()->Summarize(&frames);
+ if (is_javascript()) {
+ for (int i = static_cast<int>(frames.size()) - 1; i >= 0; i--) {
+ if (!IsValidJSFunction(*frames[i].AsJavaScript().function())) continue;
+ return frames[i];
+ }
+ UNREACHABLE();
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ if (is_wasm()) return frames.back();
+#endif // V8_ENABLE_WEBASSEMBLY
+ UNREACHABLE();
+}
+
+// static
+bool StackTraceFrameIterator::IsValidFrame(StackFrame* frame) {
if (frame->is_java_script()) {
- JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(frame);
- if (!js_frame->function().IsJSFunction()) return false;
- return js_frame->function().shared().IsSubjectToDebugging();
+ return IsValidJSFunction(static_cast<JavaScriptFrame*>(frame)->function());
}
- // Apart from JavaScript frames, only Wasm frames are valid.
#if V8_ENABLE_WEBASSEMBLY
if (frame->is_wasm()) return true;
#endif // V8_ENABLE_WEBASSEMBLY
return false;
}
+// static
+bool StackTraceFrameIterator::IsValidJSFunction(JSFunction f) {
+ if (!f.IsJSFunction()) return false;
+ return f.shared().IsSubjectToDebugging();
+}
+
// -------------------------------------------------------------------------
namespace {
@@ -1154,7 +1177,8 @@ int OptimizedFrame::ComputeParametersCount() const {
Code code = LookupCode();
if (code.kind() == CodeKind::BUILTIN) {
return static_cast<int>(
- Memory<intptr_t>(fp() + StandardFrameConstants::kArgCOffset));
+ Memory<intptr_t>(fp() + StandardFrameConstants::kArgCOffset)) -
+ kJSArgcReceiverSlots;
} else {
return JavaScriptFrame::ComputeParametersCount();
}
@@ -1327,12 +1351,13 @@ Object CommonFrameWithJSLinkage::GetParameter(int index) const {
int CommonFrameWithJSLinkage::ComputeParametersCount() const {
DCHECK(can_access_heap_objects() &&
isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
- return function().shared().internal_formal_parameter_count();
+ return function().shared().internal_formal_parameter_count_without_receiver();
}
int JavaScriptFrame::GetActualArgumentCount() const {
return static_cast<int>(
- Memory<intptr_t>(fp() + StandardFrameConstants::kArgCOffset));
+ Memory<intptr_t>(fp() + StandardFrameConstants::kArgCOffset)) -
+ kJSArgcReceiverSlots;
}
Handle<FixedArray> CommonFrameWithJSLinkage::GetParameters() const {
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index 8d9dadd76d..d81a9dd878 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -5,6 +5,7 @@
#ifndef V8_EXECUTION_FRAMES_H_
#define V8_EXECUTION_FRAMES_H_
+#include "include/v8-initialization.h"
#include "src/base/bounds.h"
#include "src/codegen/safepoint-table.h"
#include "src/common/globals.h"
@@ -175,7 +176,9 @@ class StackFrame {
intptr_t type = marker >> kSmiTagSize;
// TODO(petermarshall): There is a bug in the arm simulators that causes
// invalid frame markers.
-#if defined(USE_SIMULATOR) && (V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM)
+#if (defined(USE_SIMULATOR) && \
+ (V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM)) || \
+ V8_TARGET_ARCH_RISCV64
if (static_cast<uintptr_t>(type) >= Type::NUMBER_OF_TYPES) {
// Appease UBSan.
return Type::NUMBER_OF_TYPES;
@@ -1273,9 +1276,14 @@ class V8_EXPORT_PRIVATE StackTraceFrameIterator {
#endif // V8_ENABLE_WEBASSEMBLY
inline JavaScriptFrame* javascript_frame() const;
+ // Use this instead of FrameSummary::GetTop(javascript_frame) to keep
+ // filtering behavior consistent with the rest of StackTraceFrameIterator.
+ FrameSummary GetTopValidFrame() const;
+
private:
StackFrameIterator iterator_;
- bool IsValidFrame(StackFrame* frame) const;
+ static bool IsValidFrame(StackFrame* frame);
+ static bool IsValidJSFunction(JSFunction f);
};
class SafeStackFrameIterator : public StackFrameIteratorBase {
diff --git a/deps/v8/src/execution/futex-emulation.h b/deps/v8/src/execution/futex-emulation.h
index cf8a9fd079..2ab84295e0 100644
--- a/deps/v8/src/execution/futex-emulation.h
+++ b/deps/v8/src/execution/futex-emulation.h
@@ -9,7 +9,7 @@
#include <map>
-#include "include/v8.h"
+#include "include/v8-persistent-handle.h"
#include "src/base/atomicops.h"
#include "src/base/lazy-instance.h"
#include "src/base/macros.h"
@@ -29,6 +29,8 @@
namespace v8 {
+class Promise;
+
namespace base {
class TimeDelta;
} // namespace base
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index 8363c52c49..c630cb73fa 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -14,6 +14,7 @@
#include <unordered_map>
#include <utility>
+#include "include/v8-template.h"
#include "src/api/api-inl.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/scopes.h"
@@ -151,26 +152,6 @@ uint32_t DefaultEmbeddedBlobDataSize() {
return v8_Default_embedded_blob_data_size_;
}
-#ifdef V8_MULTI_SNAPSHOTS
-extern "C" const uint8_t* v8_Trusted_embedded_blob_code_;
-extern "C" uint32_t v8_Trusted_embedded_blob_code_size_;
-extern "C" const uint8_t* v8_Trusted_embedded_blob_data_;
-extern "C" uint32_t v8_Trusted_embedded_blob_data_size_;
-
-const uint8_t* TrustedEmbeddedBlobCode() {
- return v8_Trusted_embedded_blob_code_;
-}
-uint32_t TrustedEmbeddedBlobCodeSize() {
- return v8_Trusted_embedded_blob_code_size_;
-}
-const uint8_t* TrustedEmbeddedBlobData() {
- return v8_Trusted_embedded_blob_data_;
-}
-uint32_t TrustedEmbeddedBlobDataSize() {
- return v8_Trusted_embedded_blob_data_size_;
-}
-#endif
-
namespace {
// These variables provide access to the current embedded blob without requiring
// an isolate instance. This is needed e.g. by Code::InstructionStart, which may
@@ -282,9 +263,6 @@ bool Isolate::CurrentEmbeddedBlobIsBinaryEmbedded() {
const uint8_t* code =
current_embedded_blob_code_.load(std::memory_order::memory_order_relaxed);
if (code == nullptr) return false;
-#ifdef V8_MULTI_SNAPSHOTS
- if (code == TrustedEmbeddedBlobCode()) return true;
-#endif
return code == DefaultEmbeddedBlobCode();
}
@@ -660,7 +638,8 @@ class StackTraceBuilder {
if (V8_UNLIKELY(FLAG_detailed_error_stack_trace)) {
parameters = isolate_->factory()->CopyFixedArrayUpTo(
handle(generator_object->parameters_and_registers(), isolate_),
- function->shared().internal_formal_parameter_count());
+ function->shared()
+ .internal_formal_parameter_count_without_receiver());
}
AppendFrame(receiver, function, code, offset, flags, parameters);
@@ -2171,20 +2150,16 @@ void Isolate::PrintCurrentStackTrace(FILE* out) {
bool Isolate::ComputeLocation(MessageLocation* target) {
StackTraceFrameIterator it(this);
if (it.done()) return false;
- CommonFrame* frame = it.frame();
// Compute the location from the function and the relocation info of the
// baseline code. For optimized code this will use the deoptimization
// information to get canonical location information.
- std::vector<FrameSummary> frames;
#if V8_ENABLE_WEBASSEMBLY
wasm::WasmCodeRefScope code_ref_scope;
#endif // V8_ENABLE_WEBASSEMBLY
- frame->Summarize(&frames);
- FrameSummary& summary = frames.back();
+ FrameSummary summary = it.GetTopValidFrame();
Handle<SharedFunctionInfo> shared;
Handle<Object> script = summary.script();
- if (!script->IsScript() ||
- (Script::cast(*script).source().IsUndefined(this))) {
+ if (!script->IsScript() || Script::cast(*script).source().IsUndefined(this)) {
return false;
}
@@ -2648,7 +2623,7 @@ Handle<Context> Isolate::GetIncumbentContext() {
// NOTE: This code assumes that the stack grows downward.
Address top_backup_incumbent =
top_backup_incumbent_scope()
- ? top_backup_incumbent_scope()->JSStackComparableAddress()
+ ? top_backup_incumbent_scope()->JSStackComparableAddressPrivate()
: 0;
if (!it.done() &&
(!top_backup_incumbent || it.frame()->sp() < top_backup_incumbent)) {
@@ -3412,15 +3387,6 @@ void Isolate::InitializeDefaultEmbeddedBlob() {
const uint8_t* data = DefaultEmbeddedBlobData();
uint32_t data_size = DefaultEmbeddedBlobDataSize();
-#ifdef V8_MULTI_SNAPSHOTS
- if (!FLAG_untrusted_code_mitigations) {
- code = TrustedEmbeddedBlobCode();
- code_size = TrustedEmbeddedBlobCodeSize();
- data = TrustedEmbeddedBlobData();
- data_size = TrustedEmbeddedBlobDataSize();
- }
-#endif
-
if (StickyEmbeddedBlobCode() != nullptr) {
base::MutexGuard guard(current_embedded_blob_refcount_mutex_.Pointer());
// Check again now that we hold the lock.
@@ -4295,7 +4261,6 @@ MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
DCHECK(host_import_module_dynamically_callback_ == nullptr ||
host_import_module_dynamically_with_import_assertions_callback_ ==
nullptr);
-
if (host_import_module_dynamically_callback_ == nullptr &&
host_import_module_dynamically_with_import_assertions_callback_ ==
nullptr) {
@@ -4309,7 +4274,6 @@ MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
if (!maybe_specifier.ToHandle(&specifier_str)) {
Handle<Object> exception(pending_exception(), this);
clear_pending_exception();
-
return NewRejectedPromise(this, api_context, exception);
}
DCHECK(!has_pending_exception());
@@ -4331,7 +4295,6 @@ MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
} else {
Handle<Object> exception(pending_exception(), this);
clear_pending_exception();
-
return NewRejectedPromise(this, api_context, exception);
}
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index e543c72718..e7908eac6a 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -13,9 +13,11 @@
#include <unordered_map>
#include <vector>
+#include "include/v8-context.h"
#include "include/v8-internal.h"
+#include "include/v8-isolate.h"
#include "include/v8-metrics.h"
-#include "include/v8.h"
+#include "include/v8-snapshot.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/builtins/builtins.h"
@@ -33,6 +35,7 @@
#include "src/heap/heap.h"
#include "src/heap/read-only-heap.h"
#include "src/init/isolate-allocator.h"
+#include "src/init/vm-cage.h"
#include "src/objects/code.h"
#include "src/objects/contexts.h"
#include "src/objects/debug-objects.h"
@@ -91,6 +94,7 @@ class EternalHandles;
class HandleScopeImplementer;
class HeapObjectToIndexHashMap;
class HeapProfiler;
+class GlobalHandles;
class InnerPointerToCodeCache;
class LazyCompileDispatcher;
class LocalIsolate;
diff --git a/deps/v8/src/execution/loong64/frame-constants-loong64.cc b/deps/v8/src/execution/loong64/frame-constants-loong64.cc
new file mode 100644
index 0000000000..4bd809266c
--- /dev/null
+++ b/deps/v8/src/execution/loong64/frame-constants-loong64.cc
@@ -0,0 +1,32 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/execution/loong64/frame-constants-loong64.h"
+
+#include "src/codegen/loong64/assembler-loong64-inl.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
+
+int UnoptimizedFrameConstants::RegisterStackSlotCount(int register_count) {
+ return register_count;
+}
+
+int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
+ USE(register_count);
+ return 0;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/deps/v8/src/execution/loong64/frame-constants-loong64.h b/deps/v8/src/execution/loong64/frame-constants-loong64.h
new file mode 100644
index 0000000000..1395f47a7b
--- /dev/null
+++ b/deps/v8/src/execution/loong64/frame-constants-loong64.h
@@ -0,0 +1,76 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_LOONG64_FRAME_CONSTANTS_LOONG64_H_
+#define V8_EXECUTION_LOONG64_FRAME_CONSTANTS_LOONG64_H_
+
+#include "src/base/bits.h"
+#include "src/base/macros.h"
+#include "src/execution/frame-constants.h"
+
+namespace v8 {
+namespace internal {
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ // This is the offset to where JSEntry pushes the current value of
+ // Isolate::c_entry_fp onto the stack.
+ static constexpr int kCallerFPOffset = -3 * kSystemPointerSize;
+};
+
+class WasmCompileLazyFrameConstants : public TypedFrameConstants {
+ public:
+ static constexpr int kNumberOfSavedGpParamRegs = 7;
+ static constexpr int kNumberOfSavedFpParamRegs = 8;
+ static constexpr int kNumberOfSavedAllParamRegs = 15;
+
+ // FP-relative.
+ // See Generate_WasmCompileLazy in builtins-loong64.cc.
+ static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(6);
+ static constexpr int kFixedFrameSizeFromFp =
+ TypedFrameConstants::kFixedFrameSizeFromFp +
+ kNumberOfSavedGpParamRegs * kPointerSize +
+ kNumberOfSavedFpParamRegs * kDoubleSize;
+};
+
+// Frame constructed by the {WasmDebugBreak} builtin.
+// After pushing the frame type marker, the builtin pushes all Liftoff cache
+// registers (see liftoff-assembler-defs.h).
+class WasmDebugBreakFrameConstants : public TypedFrameConstants {
+ public:
+ // {a0 ... a7, t0 ... t5, s0, s1, s2, s5, s7, s8}
+ static constexpr uint32_t kPushedGpRegs = 0b11010011100000111111111111110000;
+ // {f0, f1, f2, ... f27, f28}
+ static constexpr uint32_t kPushedFpRegs = 0x1fffffff;
+
+ static constexpr int kNumPushedGpRegisters =
+ base::bits::CountPopulation(kPushedGpRegs);
+ static constexpr int kNumPushedFpRegisters =
+ base::bits::CountPopulation(kPushedFpRegs);
+
+ static constexpr int kLastPushedGpRegisterOffset =
+ -kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize;
+ static constexpr int kLastPushedFpRegisterOffset =
+ kLastPushedGpRegisterOffset - kNumPushedFpRegisters * kDoubleSize;
+
+ // Offsets are fp-relative.
+ static int GetPushedGpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedGpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
+ }
+
+ static int GetPushedFpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedFpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kDoubleSize;
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_LOONG64_FRAME_CONSTANTS_LOONG64_H_
diff --git a/deps/v8/src/execution/loong64/simulator-loong64.cc b/deps/v8/src/execution/loong64/simulator-loong64.cc
new file mode 100644
index 0000000000..33f10304f6
--- /dev/null
+++ b/deps/v8/src/execution/loong64/simulator-loong64.cc
@@ -0,0 +1,5538 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/loong64/simulator-loong64.h"
+
+// Only build the simulator if not compiling for real LOONG64 hardware.
+#if defined(USE_SIMULATOR)
+
+#include <limits.h>
+#include <stdarg.h>
+#include <stdlib.h>
+
+#include <cmath>
+
+#include "src/base/bits.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/wrappers.h"
+#include "src/base/strings.h"
+#include "src/base/vector.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/disasm.h"
+#include "src/heap/combined-heap.h"
+#include "src/runtime/runtime-utils.h"
+#include "src/utils/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
+ Simulator::GlobalMonitor::Get)
+
+// #define PRINT_SIM_LOG
+
+// Util functions.
+inline bool HaveSameSign(int64_t a, int64_t b) { return ((a ^ b) >= 0); }
+
+uint32_t get_fcsr_condition_bit(uint32_t cc) {
+ if (cc == 0) {
+ return 23;
+ } else {
+ return 24 + cc;
+ }
+}
+
+static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
+ uint64_t u0, v0, w0;
+ int64_t u1, v1, w1, w2, t;
+
+ u0 = u & 0xFFFFFFFFL;
+ u1 = u >> 32;
+ v0 = v & 0xFFFFFFFFL;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xFFFFFFFFL;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+static uint64_t MultiplyHighUnsigned(uint64_t u, uint64_t v) {
+ uint64_t u0, v0, w0;
+ uint64_t u1, v1, w1, w2, t;
+
+ u0 = u & 0xFFFFFFFFL;
+ u1 = u >> 32;
+ v0 = v & 0xFFFFFFFFL;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xFFFFFFFFL;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+#ifdef PRINT_SIM_LOG
+inline void printf_instr(const char* _Format, ...) {
+ va_list varList;
+ va_start(varList, _Format);
+ vprintf(_Format, varList);
+ va_end(varList);
+}
+#else
+#define printf_instr(...)
+#endif
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent was through
+// ::v8::internal::OS in the same way as base::SNPrintF is that the Windows C
+// Run-Time Library does not provide vsscanf.
+#define SScanF sscanf
+
+// The Loong64Debugger class is used by the simulator while debugging simulated
+// code.
+class Loong64Debugger {
+ public:
+ explicit Loong64Debugger(Simulator* sim) : sim_(sim) {}
+
+ void Stop(Instruction* instr);
+ void Debug();
+ // Print all registers with a nice formatting.
+ void PrintAllRegs();
+ void PrintAllRegsIncludingFPU();
+
+ private:
+ // We set the breakpoint code to 0xFFFF to easily recognize it.
+ static const Instr kBreakpointInstr = BREAK | 0xFFFF;
+ static const Instr kNopInstr = 0x0;
+
+ Simulator* sim_;
+
+ int64_t GetRegisterValue(int regnum);
+ int64_t GetFPURegisterValue(int regnum);
+ float GetFPURegisterValueFloat(int regnum);
+ double GetFPURegisterValueDouble(int regnum);
+ bool GetValue(const char* desc, int64_t* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool SetBreakpoint(Instruction* breakpc);
+ bool DeleteBreakpoint(Instruction* breakpc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void UndoBreakpoints();
+ void RedoBreakpoints();
+};
+
+inline void UNSUPPORTED() { printf("Sim: Unsupported instruction.\n"); }
+
+void Loong64Debugger::Stop(Instruction* instr) {
+ // Get the stop code.
+ uint32_t code = instr->Bits(25, 6);
+ PrintF("Simulator hit (%u)\n", code);
+ Debug();
+}
+
+int64_t Loong64Debugger::GetRegisterValue(int regnum) {
+ if (regnum == kNumSimuRegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_register(regnum);
+ }
+}
+
+int64_t Loong64Debugger::GetFPURegisterValue(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register(regnum);
+ }
+}
+
+float Loong64Debugger::GetFPURegisterValueFloat(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_float(regnum);
+ }
+}
+
+double Loong64Debugger::GetFPURegisterValueDouble(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_double(regnum);
+ }
+}
+
+bool Loong64Debugger::GetValue(const char* desc, int64_t* value) {
+ int regnum = Registers::Number(desc);
+ int fpuregnum = FPURegisters::Number(desc);
+
+ if (regnum != kInvalidRegister) {
+ *value = GetRegisterValue(regnum);
+ return true;
+ } else if (fpuregnum != kInvalidFPURegister) {
+ *value = GetFPURegisterValue(fpuregnum);
+ return true;
+ } else if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc + 2, "%" SCNx64, reinterpret_cast<uint64_t*>(value)) ==
+ 1;
+ } else {
+ return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
+ }
+}
+
+bool Loong64Debugger::SetBreakpoint(Instruction* breakpc) {
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_ != nullptr) {
+ return false;
+ }
+
+ // Set the breakpoint.
+ sim_->break_pc_ = breakpc;
+ sim_->break_instr_ = breakpc->InstructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+}
+
+bool Loong64Debugger::DeleteBreakpoint(Instruction* breakpc) {
+ if (sim_->break_pc_ != nullptr) {
+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ }
+
+ sim_->break_pc_ = nullptr;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+void Loong64Debugger::UndoBreakpoints() {
+ if (sim_->break_pc_ != nullptr) {
+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ }
+}
+
+void Loong64Debugger::RedoBreakpoints() {
+ if (sim_->break_pc_ != nullptr) {
+ sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
+ }
+}
+
+void Loong64Debugger::PrintAllRegs() {
+#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
+
+ PrintF("\n");
+ // at, v0, a0.
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 "\t%3s: 0x%016" PRIx64 " %14" PRId64
+ "\t%3s: 0x%016" PRIx64 " %14" PRId64 "\n",
+ REG_INFO(1), REG_INFO(2), REG_INFO(4));
+ // v1, a1.
+ PrintF("%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \n",
+ "", REG_INFO(3), REG_INFO(5));
+ // a2.
+ PrintF("%34s\t%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", "", "",
+ REG_INFO(6));
+ // a3.
+ PrintF("%34s\t%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", "", "",
+ REG_INFO(7));
+ PrintF("\n");
+ // a4-t3, s0-s7
+ for (int i = 0; i < 8; i++) {
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \n",
+ REG_INFO(8 + i), REG_INFO(16 + i));
+ }
+ PrintF("\n");
+ // t8, k0, LO.
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n",
+ REG_INFO(24), REG_INFO(26), REG_INFO(32));
+ // t9, k1, HI.
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n",
+ REG_INFO(25), REG_INFO(27), REG_INFO(33));
+ // sp, fp, gp.
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n",
+ REG_INFO(29), REG_INFO(30), REG_INFO(28));
+ // pc.
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \n",
+ REG_INFO(31), REG_INFO(34));
+
+#undef REG_INFO
+}
+
+void Loong64Debugger::PrintAllRegsIncludingFPU() {
+#define FPU_REG_INFO(n) \
+ FPURegisters::Name(n), GetFPURegisterValue(n), GetFPURegisterValueDouble(n)
+
+ PrintAllRegs();
+
+ PrintF("\n\n");
+ // f0, f1, f2, ... f31.
+ // TODO(plind): consider printing 2 columns for space efficiency.
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(0));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(1));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(2));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(3));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(4));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(5));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(6));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(7));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(8));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(9));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(10));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(11));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(12));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(13));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(14));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(15));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(16));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(17));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(18));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(19));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(20));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(21));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(22));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(23));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(24));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(25));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(26));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(27));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(28));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(29));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(30));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(31));
+
+#undef FPU_REG_INFO
+}
+
+void Loong64Debugger::Debug() {
+ intptr_t last_pc = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = {cmd, arg1, arg2};
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ UndoBreakpoints();
+
+ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+ if (last_pc != sim_->get_pc()) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ v8::base::EmbeddedVector<char, 256> buffer;
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc()));
+ PrintF(" 0x%016" PRIx64 " %s\n", sim_->get_pc(), buffer.begin());
+ last_pc = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == nullptr) {
+ break;
+ } else {
+ char* last_input = sim_->last_debugger_input();
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->set_last_debugger_input(line);
+ }
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = SScanF(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ Instruction* instr = reinterpret_cast<Instruction*>(sim_->get_pc());
+ if (!(instr->IsTrap()) ||
+ instr->InstructionBits() == rtCallRedirInstr) {
+ sim_->InstructionDecode(
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
+ } else {
+ // Allow si to jump over generated breakpoints.
+ PrintF("/!\\ Jumping over generated breakpoint.\n");
+ sim_->set_pc(sim_->get_pc() + kInstrSize);
+ }
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ double dvalue;
+ if (strcmp(arg1, "all") == 0) {
+ PrintAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ PrintAllRegsIncludingFPU();
+ } else {
+ int regnum = Registers::Number(arg1);
+ int fpuregnum = FPURegisters::Number(arg1);
+
+ if (regnum != kInvalidRegister) {
+ value = GetRegisterValue(regnum);
+ PrintF("%s: 0x%08" PRIx64 " %" PRId64 " \n", arg1, value,
+ value);
+ } else if (fpuregnum != kInvalidFPURegister) {
+ value = GetFPURegisterValue(fpuregnum);
+ dvalue = GetFPURegisterValueDouble(fpuregnum);
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n",
+ FPURegisters::Name(fpuregnum), value, dvalue);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ if (argc == 3) {
+ if (strcmp(arg2, "single") == 0) {
+ int64_t value;
+ float fvalue;
+ int fpuregnum = FPURegisters::Number(arg1);
+
+ if (fpuregnum != kInvalidFPURegister) {
+ value = GetFPURegisterValue(fpuregnum);
+ value &= 0xFFFFFFFFUL;
+ fvalue = GetFPURegisterValueFloat(fpuregnum);
+ PrintF("%s: 0x%08" PRIx64 " %11.4e\n", arg1, value, fvalue);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("print <fpu register> single\n");
+ }
+ } else {
+ PrintF("print <register> or print <fpu register> single\n");
+ }
+ }
+ } else if ((strcmp(cmd, "po") == 0) ||
+ (strcmp(cmd, "printobject") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ StdoutStream os;
+ if (GetValue(arg1, &value)) {
+ Object obj(value);
+ os << arg1 << ": \n";
+#ifdef DEBUG
+ obj.Print(os);
+ os << "\n";
+#else
+ os << Brief(obj) << "\n";
+#endif
+ } else {
+ os << arg1 << " unrecognized\n";
+ }
+ } else {
+ PrintF("printobject <value>\n");
+ }
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 ||
+ strcmp(cmd, "dump") == 0) {
+ int64_t* cur = nullptr;
+ int64_t* end = nullptr;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int64_t*>(sim_->get_register(Simulator::sp));
+ } else { // Command "mem".
+ int64_t value;
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int64_t*>(value);
+ next_arg++;
+ }
+
+ int64_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!GetValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ bool skip_obj_print = (strcmp(cmd, "dump") == 0);
+ while (cur < end) {
+ PrintF(" 0x%012" PRIxPTR " : 0x%016" PRIx64 " %14" PRId64 " ",
+ reinterpret_cast<intptr_t>(cur), *cur, *cur);
+ Object obj(*cur);
+ Heap* current_heap = sim_->isolate_->heap();
+ if (!skip_obj_print) {
+ if (obj.IsSmi() ||
+ IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
+ PrintF(" (");
+ if (obj.IsSmi()) {
+ PrintF("smi %d", Smi::ToInt(obj));
+ } else {
+ obj.ShortPrint();
+ }
+ PrintF(")");
+ }
+ }
+ PrintF("\n");
+ cur++;
+ }
+
+ } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0) ||
+ (strcmp(cmd, "di") == 0)) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ v8::base::EmbeddedVector<char, 256> buffer;
+
+ byte* cur = nullptr;
+ byte* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * kInstrSize);
+ } else if (argc == 2) {
+ int regnum = Registers::Number(arg1);
+ if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * kInstrSize);
+ }
+ }
+ } else {
+ int64_t value1;
+ int64_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ dasm.InstructionDecode(buffer, cur);
+ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
+ buffer.begin());
+ cur += kInstrSize;
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ PrintF("relinquishing control to gdb\n");
+ v8::base::OS::DebugBreak();
+ PrintF("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
+ PrintF("setting breakpoint failed\n");
+ }
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("break <address>\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!DeleteBreakpoint(nullptr)) {
+ PrintF("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ PrintF("No flags on LOONG64 !\n");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int64_t value;
+ intptr_t stop_pc = sim_->get_pc() - 2 * kInstrSize;
+ Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
+ Instruction* msg_address =
+ reinterpret_cast<Instruction*>(stop_pc + kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->IsStopInstruction(stop_instr)) {
+ stop_instr->SetInstructionBits(kNopInstr);
+ msg_address->SetInstructionBits(kNopInstr);
+ } else {
+ PrintF("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ PrintF("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->PrintStopInfo(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->PrintStopInfo(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->EnableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->EnableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->DisableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->DisableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ PrintF("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
+ // Print registers and disassemble.
+ PrintAllRegs();
+ PrintF("\n");
+
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ v8::base::EmbeddedVector<char, 256> buffer;
+
+ byte* cur = nullptr;
+ byte* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * kInstrSize);
+ } else if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // no length parameter passed, assume 10 instructions
+ end = cur + (10 * kInstrSize);
+ }
+ } else {
+ int64_t value1;
+ int64_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ dasm.InstructionDecode(buffer, cur);
+ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
+ buffer.begin());
+ cur += kInstrSize;
+ }
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ PrintF("cont\n");
+ PrintF(" continue execution (alias 'c')\n");
+ PrintF("stepi\n");
+ PrintF(" step one instruction (alias 'si')\n");
+ PrintF("print <register>\n");
+ PrintF(" print register content (alias 'p')\n");
+ PrintF(" use register name 'all' to print all registers\n");
+ PrintF("printobject <register>\n");
+ PrintF(" print an object from a register (alias 'po')\n");
+ PrintF("stack [<words>]\n");
+ PrintF(" dump stack content, default dump 10 words)\n");
+ PrintF("mem <address> [<words>]\n");
+ PrintF(" dump memory content, default dump 10 words)\n");
+ PrintF("dump [<words>]\n");
+ PrintF(
+ " dump memory content without pretty printing JS objects, default "
+ "dump 10 words)\n");
+ PrintF("flags\n");
+ PrintF(" print flags\n");
+ PrintF("disasm [<instructions>]\n");
+ PrintF("disasm [<address/register>]\n");
+ PrintF("disasm [[<address/register>] <instructions>]\n");
+ PrintF(" disassemble code, default is 10 instructions\n");
+ PrintF(" from pc (alias 'di')\n");
+ PrintF("gdb\n");
+ PrintF(" enter gdb\n");
+ PrintF("break <address>\n");
+ PrintF(" set a break point on the address\n");
+ PrintF("del\n");
+ PrintF(" delete the breakpoint\n");
+ PrintF("stop feature:\n");
+ PrintF(" Description:\n");
+ PrintF(" Stops are debug instructions inserted by\n");
+ PrintF(" the Assembler::stop() function.\n");
+ PrintF(" When hitting a stop, the Simulator will\n");
+ PrintF(" stop and give control to the Debugger.\n");
+ PrintF(" All stop codes are watched:\n");
+ PrintF(" - They can be enabled / disabled: the Simulator\n");
+ PrintF(" will / won't stop when hitting them.\n");
+ PrintF(" - The Simulator keeps track of how many times they \n");
+ PrintF(" are met. (See the info command.) Going over a\n");
+ PrintF(" disabled stop still increases its counter. \n");
+ PrintF(" Commands:\n");
+ PrintF(" stop info all/<code> : print infos about number <code>\n");
+ PrintF(" or all stop(s).\n");
+ PrintF(" stop enable/disable all/<code> : enables / disables\n");
+ PrintF(" all or number <code> stop(s)\n");
+ PrintF(" stop unstop\n");
+ PrintF(" ignore the stop instruction at the current location\n");
+ PrintF(" from now on\n");
+ } else {
+ PrintF("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ RedoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+bool Simulator::ICacheMatch(void* one, void* two) {
+ DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
+ return one == two;
+}
+
+static uint32_t ICacheHash(void* key) {
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
+}
+
+static bool AllOnOnePage(uintptr_t start, size_t size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+void Simulator::set_last_debugger_input(char* input) {
+ DeleteArray(last_debugger_input_);
+ last_debugger_input_ = input;
+}
+
+void Simulator::SetRedirectInstruction(Instruction* instruction) {
+ instruction->SetInstructionBits(rtCallRedirInstr);
+}
+
+void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
+ void* start_addr, size_t size) {
+ int64_t start = reinterpret_cast<int64_t>(start_addr);
+ int64_t intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePage(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ DCHECK_EQ((int64_t)0, start & CachePage::kPageMask);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePage(i_cache, start, size);
+ }
+}
+
+CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
+ void* page) {
+ base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
+ if (entry->value == nullptr) {
+ CachePage* new_page = new CachePage();
+ entry->value = new_page;
+ }
+ return reinterpret_cast<CachePage*>(entry->value);
+}
+
+// Flush from start up to and not including start + size.
+void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
+ intptr_t start, size_t size) {
+ DCHECK_LE(size, CachePage::kPageSize);
+ DCHECK(AllOnOnePage(start, size - 1));
+ DCHECK_EQ(start & CachePage::kLineMask, 0);
+ DCHECK_EQ(size & CachePage::kLineMask, 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* valid_bytemap = cache_page->ValidityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
+ Instruction* instr) {
+ int64_t address = reinterpret_cast<int64_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* cache_valid_byte = cache_page->ValidityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ CHECK_EQ(0, memcmp(reinterpret_cast<void*>(instr),
+ cache_page->CachedData(offset), kInstrSize));
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
+ // Set up simulator support first. Some of this information is needed to
+ // setup the architecture state.
+ stack_size_ = FLAG_sim_stack_size * KB;
+ stack_ = reinterpret_cast<char*>(base::Malloc(stack_size_));
+ pc_modified_ = false;
+ icount_ = 0;
+ break_count_ = 0;
+ break_pc_ = nullptr;
+ break_instr_ = 0;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < kNumSimuRegisters; i++) {
+ registers_[i] = 0;
+ }
+ for (int i = 0; i < kNumFPURegisters; i++) {
+ FPUregisters_[i] = 0;
+ }
+ for (int i = 0; i < kNumCFRegisters; i++) {
+ CFregisters_[i] = 0;
+ }
+
+ FCSR_ = 0;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int64_t>(stack_) + stack_size_ - 64;
+ // The ra and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_ra;
+ registers_[ra] = bad_ra;
+
+ last_debugger_input_ = nullptr;
+}
+
+Simulator::~Simulator() {
+ GlobalMonitor::Get()->RemoveLinkedAddress(&global_monitor_thread_);
+ base::Free(stack_);
+}
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current(Isolate* isolate) {
+ v8::internal::Isolate::PerIsolateThreadData* isolate_data =
+ isolate->FindOrAllocatePerThreadDataForThisThread();
+ DCHECK_NOT_NULL(isolate_data);
+
+ Simulator* sim = isolate_data->simulator();
+ if (sim == nullptr) {
+ // TODO(146): delete the simulator object when a thread/isolate goes away.
+ sim = new Simulator(isolate);
+ isolate_data->set_simulator(sim);
+ }
+ return sim;
+}
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::set_register(int reg, int64_t value) {
+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+
+ // Zero register always holds 0.
+ registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+void Simulator::set_dw_register(int reg, const int* dbl) {
+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
+ registers_[reg] = dbl[1];
+ registers_[reg] = registers_[reg] << 32;
+ registers_[reg] += dbl[0];
+}
+
+void Simulator::set_fpu_register(int fpureg, int64_t value) {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ FPUregisters_[fpureg] = value;
+}
+
+void Simulator::set_fpu_register_word(int fpureg, int32_t value) {
+ // Set ONLY lower 32-bits, leaving upper bits untouched.
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ int32_t* pword;
+ pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
+
+ *pword = value;
+}
+
+void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
+ // Set ONLY upper 32-bits, leaving lower bits untouched.
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ int32_t* phiword;
+ phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg])) + 1;
+
+ *phiword = value;
+}
+
+void Simulator::set_fpu_register_float(int fpureg, float value) {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ *bit_cast<float*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::set_fpu_register_double(int fpureg, double value) {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ *bit_cast<double*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::set_cf_register(int cfreg, bool value) {
+ DCHECK((cfreg >= 0) && (cfreg < kNumCFRegisters));
+ CFregisters_[cfreg] = value;
+}
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int64_t Simulator::get_register(int reg) const {
+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
+ if (reg == 0)
+ return 0;
+ else
+ return registers_[reg];
+}
+
+double Simulator::get_double_from_register_pair(int reg) {
+ // TODO(plind): bad ABI stuff, refactor or remove.
+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
+
+ double dm_val = 0.0;
+ // Read the bits from the unsigned integer register_[] array
+ // into the double precision floating point value and return it.
+ char buffer[sizeof(registers_[0])];
+ memcpy(buffer, &registers_[reg], sizeof(registers_[0]));
+ memcpy(&dm_val, buffer, sizeof(registers_[0]));
+ return (dm_val);
+}
+
+int64_t Simulator::get_fpu_register(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return FPUregisters_[fpureg];
+}
+
+int32_t Simulator::get_fpu_register_word(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>(FPUregisters_[fpureg] & 0xFFFFFFFF);
+}
+
+int32_t Simulator::get_fpu_register_signed_word(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>(FPUregisters_[fpureg] & 0xFFFFFFFF);
+}
+
+int32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>((FPUregisters_[fpureg] >> 32) & 0xFFFFFFFF);
+}
+
+float Simulator::get_fpu_register_float(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return *bit_cast<float*>(const_cast<int64_t*>(&FPUregisters_[fpureg]));
+}
+
+double Simulator::get_fpu_register_double(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return *bit_cast<double*>(&FPUregisters_[fpureg]);
+}
+
+bool Simulator::get_cf_register(int cfreg) const {
+ DCHECK((cfreg >= 0) && (cfreg < kNumCFRegisters));
+ return CFregisters_[cfreg];
+}
+
+// Runtime FP routines take up to two double arguments and zero
+// or one integer arguments. All are constructed here,
+// from a0-a3 or fa0 and fa1 (n64).
+void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
+ const int fparg2 = f1;
+ *x = get_fpu_register_double(f0);
+ *y = get_fpu_register_double(fparg2);
+ *z = static_cast<int32_t>(get_register(a2));
+}
+
+// The return value is either in v0/v1 or f0.
+void Simulator::SetFpResult(const double& result) {
+ set_fpu_register_double(0, result);
+}
+
+// Helper functions for setting and testing the FCSR register's bits.
+void Simulator::set_fcsr_bit(uint32_t cc, bool value) {
+ if (value) {
+ FCSR_ |= (1 << cc);
+ } else {
+ FCSR_ &= ~(1 << cc);
+ }
+}
+
+bool Simulator::test_fcsr_bit(uint32_t cc) { return FCSR_ & (1 << cc); }
+
+void Simulator::set_fcsr_rounding_mode(FPURoundingMode mode) {
+ FCSR_ |= mode & kFPURoundingModeMask;
+}
+
+unsigned int Simulator::get_fcsr_rounding_mode() {
+ return FCSR_ & kFPURoundingModeMask;
+}
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round_error(double original, double rounded) {
+ bool ret = false;
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, false);
+ set_fcsr_bit(kFCSRUnderflowCauseBit, false);
+ set_fcsr_bit(kFCSROverflowCauseBit, false);
+ set_fcsr_bit(kFCSRInexactCauseBit, false);
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactCauseBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowCauseBit, true);
+ ret = true;
+ }
+
+ if (rounded > max_int32 || rounded < min_int32) {
+ set_fcsr_bit(kFCSROverflowCauseBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round64_error(double original, double rounded) {
+ bool ret = false;
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, false);
+ set_fcsr_bit(kFCSRUnderflowCauseBit, false);
+ set_fcsr_bit(kFCSROverflowCauseBit, false);
+ set_fcsr_bit(kFCSRInexactCauseBit, false);
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactCauseBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowCauseBit, true);
+ ret = true;
+ }
+
+ if (rounded >= max_int64 || rounded < min_int64) {
+ set_fcsr_bit(kFCSROverflowCauseBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round_error(float original, float rounded) {
+ bool ret = false;
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, false);
+ set_fcsr_bit(kFCSRUnderflowCauseBit, false);
+ set_fcsr_bit(kFCSROverflowCauseBit, false);
+ set_fcsr_bit(kFCSRInexactCauseBit, false);
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactCauseBit, true);
+ }
+
+ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowCauseBit, true);
+ ret = true;
+ }
+
+ if (rounded > max_int32 || rounded < min_int32) {
+ set_fcsr_bit(kFCSROverflowCauseBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+void Simulator::set_fpu_register_word_invalid_result(float original,
+ float rounded) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register_word(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::set_fpu_register_invalid_result(float original, float rounded) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::set_fpu_register_invalid_result64(float original,
+ float rounded) {
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded >= max_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ } else if (rounded < min_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::set_fpu_register_word_invalid_result(double original,
+ double rounded) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register_word(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::set_fpu_register_invalid_result(double original,
+ double rounded) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::set_fpu_register_invalid_result64(double original,
+ double rounded) {
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded >= max_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ } else if (rounded < min_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round64_error(float original, float rounded) {
+ bool ret = false;
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, false);
+ set_fcsr_bit(kFCSRUnderflowCauseBit, false);
+ set_fcsr_bit(kFCSROverflowCauseBit, false);
+ set_fcsr_bit(kFCSRInexactCauseBit, false);
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactCauseBit, true);
+ }
+
+ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowCauseBit, true);
+ ret = true;
+ }
+
+ if (rounded >= max_int64 || rounded < min_int64) {
+ set_fcsr_bit(kFCSROverflowCauseBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+// For ftint instructions only
+void Simulator::round_according_to_fcsr(double toRound, double* rounded,
+ int32_t* rounded_int) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or
+ // equal to the infinitely accurate result.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down.
+ // switch ((FCSR_ >> 8) & 3) {
+ switch (FCSR_ & kFPURoundingModeMask) {
+ case kRoundToNearest:
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ *rounded_int -= 1;
+ *rounded -= 1.;
+ }
+ break;
+ case kRoundToZero:
+ *rounded = trunc(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ case kRoundToPlusInf:
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ case kRoundToMinusInf:
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ }
+}
+
+void Simulator::round64_according_to_fcsr(double toRound, double* rounded,
+ int64_t* rounded_int) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or.
+ // equal to the infinitely accurate result.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down.
+ switch (FCSR_ & kFPURoundingModeMask) {
+ case kRoundToNearest:
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ *rounded_int -= 1;
+ *rounded -= 1.;
+ }
+ break;
+ case kRoundToZero:
+ *rounded = std::trunc(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ case kRoundToPlusInf:
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ case kRoundToMinusInf:
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ }
+}
+
+void Simulator::round_according_to_fcsr(float toRound, float* rounded,
+ int32_t* rounded_int) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or
+ // equal to the infinitely accurate result.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down.
+ switch (FCSR_ & kFPURoundingModeMask) {
+ case kRoundToNearest:
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ *rounded_int -= 1;
+ *rounded -= 1.f;
+ }
+ break;
+ case kRoundToZero:
+ *rounded = std::trunc(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ case kRoundToPlusInf:
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ case kRoundToMinusInf:
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ }
+}
+
+void Simulator::round64_according_to_fcsr(float toRound, float* rounded,
+ int64_t* rounded_int) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or.
+ // equal to the infinitely accurate result.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down.
+ switch (FCSR_ & kFPURoundingModeMask) {
+ case kRoundToNearest:
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ *rounded_int -= 1;
+ *rounded -= 1.f;
+ }
+ break;
+ case kRoundToZero:
+ *rounded = trunc(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ case kRoundToPlusInf:
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ case kRoundToMinusInf:
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ }
+}
+
+// Raw access to the PC register.
+void Simulator::set_pc(int64_t value) {
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+bool Simulator::has_bad_pc() const {
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int64_t Simulator::get_pc() const { return registers_[pc]; }
+
+// TODO(plind): refactor this messy debug code when we do unaligned access.
+void Simulator::DieOrDebug() {
+ if ((1)) { // Flag for this was removed.
+ Loong64Debugger dbg(this);
+ dbg.Debug();
+ } else {
+ base::OS::Abort();
+ }
+}
+
+void Simulator::TraceRegWr(int64_t value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ union {
+ int64_t fmt_int64;
+ int32_t fmt_int32[2];
+ float fmt_float[2];
+ double fmt_double;
+ } v;
+ v.fmt_int64 = value;
+
+ switch (t) {
+ case WORD:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32
+ " uint32:%" PRIu32,
+ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0]);
+ break;
+ case DWORD:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") int64:%" PRId64
+ " uint64:%" PRIu64,
+ value, icount_, value, value);
+ break;
+ case FLOAT:
+ base::SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") flt:%e",
+ v.fmt_int64, icount_, v.fmt_float[0]);
+ break;
+ case DOUBLE:
+ base::SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") dbl:%e",
+ v.fmt_int64, icount_, v.fmt_double);
+ break;
+ case FLOAT_DOUBLE:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") flt:%e dbl:%e",
+ v.fmt_int64, icount_, v.fmt_float[0], v.fmt_double);
+ break;
+ case WORD_DWORD:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32
+ " uint32:%" PRIu32 " int64:%" PRId64 " uint64:%" PRIu64,
+ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0],
+ v.fmt_int64, v.fmt_int64);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+// TODO(plind): consider making icount_ printing a flag option.
+void Simulator::TraceMemRd(int64_t addr, int64_t value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ union {
+ int64_t fmt_int64;
+ int32_t fmt_int32[2];
+ float fmt_float[2];
+ double fmt_double;
+ } v;
+ v.fmt_int64 = value;
+
+ switch (t) {
+ case WORD:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") int32:%" PRId32 " uint32:%" PRIu32,
+ v.fmt_int64, addr, icount_, v.fmt_int32[0],
+ v.fmt_int32[0]);
+ break;
+ case DWORD:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") int64:%" PRId64 " uint64:%" PRIu64,
+ value, addr, icount_, value, value);
+ break;
+ case FLOAT:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") flt:%e",
+ v.fmt_int64, addr, icount_, v.fmt_float[0]);
+ break;
+ case DOUBLE:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") dbl:%e",
+ v.fmt_int64, addr, icount_, v.fmt_double);
+ break;
+ case FLOAT_DOUBLE:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") flt:%e dbl:%e",
+ v.fmt_int64, addr, icount_, v.fmt_float[0],
+ v.fmt_double);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+void Simulator::TraceMemWr(int64_t addr, int64_t value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (t) {
+ case BYTE:
+ base::SNPrintF(trace_buf_,
+ " %02" PRIx8 " --> [%016" PRIx64
+ "] (%" PRId64 ")",
+ static_cast<uint8_t>(value), addr, icount_);
+ break;
+ case HALF:
+ base::SNPrintF(trace_buf_,
+ " %04" PRIx16 " --> [%016" PRIx64
+ "] (%" PRId64 ")",
+ static_cast<uint16_t>(value), addr, icount_);
+ break;
+ case WORD:
+ base::SNPrintF(trace_buf_,
+ " %08" PRIx32 " --> [%016" PRIx64 "] (%" PRId64
+ ")",
+ static_cast<uint32_t>(value), addr, icount_);
+ break;
+ case DWORD:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " --> [%016" PRIx64 "] (%" PRId64 " )",
+ value, addr, icount_);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+template <typename T>
+void Simulator::TraceMemRd(int64_t addr, T value) {
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (sizeof(T)) {
+ case 1:
+ base::SNPrintF(trace_buf_,
+ "%08" PRIx8 " <-- [%08" PRIx64 "] (%" PRIu64
+ ") int8:%" PRId8 " uint8:%" PRIu8,
+ static_cast<uint8_t>(value), addr, icount_,
+ static_cast<int8_t>(value), static_cast<uint8_t>(value));
+ break;
+ case 2:
+ base::SNPrintF(trace_buf_,
+ "%08" PRIx16 " <-- [%08" PRIx64 "] (%" PRIu64
+ ") int16:%" PRId16 " uint16:%" PRIu16,
+ static_cast<uint16_t>(value), addr, icount_,
+ static_cast<int16_t>(value),
+ static_cast<uint16_t>(value));
+ break;
+ case 4:
+ base::SNPrintF(trace_buf_,
+ "%08" PRIx32 " <-- [%08" PRIx64 "] (%" PRIu64
+ ") int32:%" PRId32 " uint32:%" PRIu32,
+ static_cast<uint32_t>(value), addr, icount_,
+ static_cast<int32_t>(value),
+ static_cast<uint32_t>(value));
+ break;
+ case 8:
+ base::SNPrintF(trace_buf_,
+ "%08" PRIx64 " <-- [%08" PRIx64 "] (%" PRIu64
+ ") int64:%" PRId64 " uint64:%" PRIu64,
+ static_cast<uint64_t>(value), addr, icount_,
+ static_cast<int64_t>(value),
+ static_cast<uint64_t>(value));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+template <typename T>
+void Simulator::TraceMemWr(int64_t addr, T value) {
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (sizeof(T)) {
+ case 1:
+ base::SNPrintF(trace_buf_,
+ " %02" PRIx8 " --> [%08" PRIx64 "] (%" PRIu64
+ ")",
+ static_cast<uint8_t>(value), addr, icount_);
+ break;
+ case 2:
+ base::SNPrintF(trace_buf_,
+ " %04" PRIx16 " --> [%08" PRIx64 "] (%" PRIu64 ")",
+ static_cast<uint16_t>(value), addr, icount_);
+ break;
+ case 4:
+ base::SNPrintF(trace_buf_,
+ "%08" PRIx32 " --> [%08" PRIx64 "] (%" PRIu64 ")",
+ static_cast<uint32_t>(value), addr, icount_);
+ break;
+ case 8:
+ base::SNPrintF(trace_buf_,
+ "%16" PRIx64 " --> [%08" PRIx64 "] (%" PRIu64 ")",
+ static_cast<uint64_t>(value), addr, icount_);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+// TODO(plind): sign-extend and zero-extend not implmented properly
+// on all the ReadXX functions, I don't think re-interpret cast does it.
+int32_t Simulator::ReadW(int64_t addr, Instruction* instr, TraceType t) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ /* if ((addr & 0x3) == 0)*/ {
+ local_monitor_.NotifyLoad();
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr), t);
+ return *ptr;
+ }
+ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
+ // addr,
+ // reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+ // return 0;
+}
+
+uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ // if ((addr & 0x3) == 0) {
+ local_monitor_.NotifyLoad();
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr), WORD);
+ return *ptr;
+ // }
+ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
+ // reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+ // return 0;
+}
+
+void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ /*if ((addr & 0x3) == 0)*/ {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, WORD);
+ int* ptr = reinterpret_cast<int*>(addr);
+ *ptr = value;
+ return;
+ }
+ // PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
+ // addr,
+ // reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+}
+
+void Simulator::WriteConditionalW(int64_t addr, int32_t value,
+ Instruction* instr, int32_t rk_reg) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ if ((addr & 0x3) == 0) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ if (local_monitor_.NotifyStoreConditional(addr, TransactionSize::Word) &&
+ GlobalMonitor::Get()->NotifyStoreConditional_Locked(
+ addr, &global_monitor_thread_)) {
+ local_monitor_.NotifyStore();
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, WORD);
+ int* ptr = reinterpret_cast<int*>(addr);
+ *ptr = value;
+ set_register(rk_reg, 1);
+ } else {
+ set_register(rk_reg, 0);
+ }
+ return;
+ }
+ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+}
+
+int64_t Simulator::Read2W(int64_t addr, Instruction* instr) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ /* if ((addr & kPointerAlignmentMask) == 0)*/ {
+ local_monitor_.NotifyLoad();
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ TraceMemRd(addr, *ptr);
+ return *ptr;
+ }
+ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
+ // addr,
+ // reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+ // return 0;
+}
+
+void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ /*if ((addr & kPointerAlignmentMask) == 0)*/ {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, DWORD);
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ // PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
+ // addr,
+ // reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+}
+
+void Simulator::WriteConditional2W(int64_t addr, int64_t value,
+ Instruction* instr, int32_t rk_reg) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ if ((addr & kPointerAlignmentMask) == 0) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ if (local_monitor_.NotifyStoreConditional(addr,
+ TransactionSize::DoubleWord) &&
+ GlobalMonitor::Get()->NotifyStoreConditional_Locked(
+ addr, &global_monitor_thread_)) {
+ local_monitor_.NotifyStore();
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, DWORD);
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ *ptr = value;
+ set_register(rk_reg, 1);
+ } else {
+ set_register(rk_reg, 0);
+ }
+ return;
+ }
+ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+}
+
+double Simulator::ReadD(int64_t addr, Instruction* instr) {
+ /*if ((addr & kDoubleAlignmentMask) == 0)*/ {
+ local_monitor_.NotifyLoad();
+ double* ptr = reinterpret_cast<double*>(addr);
+ return *ptr;
+ }
+ // PrintF("Unaligned (double) read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR
+ // "\n",
+ // addr, reinterpret_cast<intptr_t>(instr));
+ // base::OS::Abort();
+ // return 0;
+}
+
+void Simulator::WriteD(int64_t addr, double value, Instruction* instr) {
+ /*if ((addr & kDoubleAlignmentMask) == 0)*/ {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ double* ptr = reinterpret_cast<double*>(addr);
+ *ptr = value;
+ return;
+ }
+ // PrintF("Unaligned (double) write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR
+ // "\n",
+ // addr, reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+}
+
+uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) {
+ // if ((addr & 1) == 0) {
+ local_monitor_.NotifyLoad();
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ return *ptr;
+ // }
+ // PrintF("Unaligned unsigned halfword read at 0x%08" PRIx64
+ // " , pc=0x%08" V8PRIxPTR "\n",
+ // addr, reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+ // return 0;
+}
+
+int16_t Simulator::ReadH(int64_t addr, Instruction* instr) {
+ // if ((addr & 1) == 0) {
+ local_monitor_.NotifyLoad();
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ return *ptr;
+ // }
+ // PrintF("Unaligned signed halfword read at 0x%08" PRIx64
+ // " , pc=0x%08" V8PRIxPTR "\n",
+ // addr, reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+ // return 0;
+}
+
+void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) {
+ // if ((addr & 1) == 0) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, HALF);
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ *ptr = value;
+ return;
+ // }
+ // PrintF("Unaligned unsigned halfword write at 0x%08" PRIx64
+ // " , pc=0x%08" V8PRIxPTR "\n",
+ // addr, reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+}
+
+void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) {
+ // if ((addr & 1) == 0) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, HALF);
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ *ptr = value;
+ return;
+ // }
+ // PrintF("Unaligned halfword write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR
+ // "\n",
+ // addr, reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+}
+
+uint32_t Simulator::ReadBU(int64_t addr) {
+ local_monitor_.NotifyLoad();
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ return *ptr & 0xFF;
+}
+
+int32_t Simulator::ReadB(int64_t addr) {
+ local_monitor_.NotifyLoad();
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ return *ptr;
+}
+
+void Simulator::WriteB(int64_t addr, uint8_t value) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, BYTE);
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+}
+
+void Simulator::WriteB(int64_t addr, int8_t value) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, BYTE);
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ *ptr = value;
+}
+
+template <typename T>
+T Simulator::ReadMem(int64_t addr, Instruction* instr) {
+ int alignment_mask = (1 << sizeof(T)) - 1;
+ if ((addr & alignment_mask) == 0) {
+ local_monitor_.NotifyLoad();
+ T* ptr = reinterpret_cast<T*>(addr);
+ TraceMemRd(addr, *ptr);
+ return *ptr;
+ }
+ PrintF("Unaligned read of type sizeof(%ld) at 0x%08lx, pc=0x%08" V8PRIxPTR
+ "\n",
+ sizeof(T), addr, reinterpret_cast<intptr_t>(instr));
+ base::OS::Abort();
+ return 0;
+}
+
+template <typename T>
+void Simulator::WriteMem(int64_t addr, T value, Instruction* instr) {
+ int alignment_mask = (1 << sizeof(T)) - 1;
+ if ((addr & alignment_mask) == 0) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ T* ptr = reinterpret_cast<T*>(addr);
+ *ptr = value;
+ TraceMemWr(addr, value);
+ return;
+ }
+ PrintF("Unaligned write of type sizeof(%ld) at 0x%08lx, pc=0x%08" V8PRIxPTR
+ "\n",
+ sizeof(T), addr, reinterpret_cast<intptr_t>(instr));
+ base::OS::Abort();
+}
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
+ // The simulator uses a separate JS stack. If we have exhausted the C stack,
+ // we also drop down the JS limit to reflect the exhaustion on the JS stack.
+ if (base::Stack::GetCurrentStackPosition() < c_limit) {
+ return reinterpret_cast<uintptr_t>(get_sp());
+ }
+
+ // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes
+ // to prevent overrunning the stack when pushing values.
+ return reinterpret_cast<uintptr_t>(stack_) + 1024;
+}
+
+// Unsupported instructions use Format to print an error and stop execution.
+void Simulator::Format(Instruction* instr, const char* format) {
+ PrintF("Simulator found unsupported instruction:\n 0x%08" PRIxPTR " : %s\n",
+ reinterpret_cast<intptr_t>(instr), format);
+ UNIMPLEMENTED();
+}
+
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in runtime.cc
+// uses the ObjectPair which is essentially two 32-bit values stuffed into a
+// 64-bit value. With the code below we assume that all runtime calls return
+// 64 bits of result. If they don't, the v1 result register contains a bogus
+// value, which is fine because it is caller-saved.
+
+using SimulatorRuntimeCall = ObjectPair (*)(int64_t arg0, int64_t arg1,
+ int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5,
+ int64_t arg6, int64_t arg7,
+ int64_t arg8, int64_t arg9);
+
+// These prototypes handle the four types of FP calls.
+using SimulatorRuntimeCompareCall = int64_t (*)(double darg0, double darg1);
+using SimulatorRuntimeFPFPCall = double (*)(double darg0, double darg1);
+using SimulatorRuntimeFPCall = double (*)(double darg0);
+using SimulatorRuntimeFPIntCall = double (*)(double darg0, int32_t arg0);
+
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+using SimulatorRuntimeDirectApiCall = void (*)(int64_t arg0);
+using SimulatorRuntimeProfilingApiCall = void (*)(int64_t arg0, void* arg1);
+
+// This signature supports direct call to accessor getter callback.
+using SimulatorRuntimeDirectGetterCall = void (*)(int64_t arg0, int64_t arg1);
+using SimulatorRuntimeProfilingGetterCall = void (*)(int64_t arg0, int64_t arg1,
+ void* arg2);
+
+// Software interrupt instructions are used by the simulator to call into the
+// C-based V8 runtime. They are also used for debugging with simulator.
+void Simulator::SoftwareInterrupt() {
+ int32_t opcode_hi15 = instr_.Bits(31, 17);
+ CHECK_EQ(opcode_hi15, 0x15);
+ uint32_t code = instr_.Bits(14, 0);
+ // We first check if we met a call_rt_redirected.
+ if (instr_.InstructionBits() == rtCallRedirInstr) {
+ Redirection* redirection = Redirection::FromInstruction(instr_.instr());
+
+ int64_t* stack_pointer = reinterpret_cast<int64_t*>(get_register(sp));
+
+ int64_t arg0 = get_register(a0);
+ int64_t arg1 = get_register(a1);
+ int64_t arg2 = get_register(a2);
+ int64_t arg3 = get_register(a3);
+ int64_t arg4 = get_register(a4);
+ int64_t arg5 = get_register(a5);
+ int64_t arg6 = get_register(a6);
+ int64_t arg7 = get_register(a7);
+ int64_t arg8 = stack_pointer[0];
+ int64_t arg9 = stack_pointer[1];
+ STATIC_ASSERT(kMaxCParameters == 10);
+
+ bool fp_call =
+ (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+
+ {
+ // With the hard floating point calling convention, double
+ // arguments are passed in FPU registers. Fetch the arguments
+ // from there and call the builtin using soft floating point
+ // convention.
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ arg0 = get_fpu_register(f0);
+ arg1 = get_fpu_register(f1);
+ arg2 = get_fpu_register(f2);
+ arg3 = get_fpu_register(f3);
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ arg0 = get_fpu_register(f0);
+ arg1 = get_fpu_register(f1);
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ arg0 = get_fpu_register(f0);
+ arg1 = get_fpu_register(f1);
+ arg2 = get_register(a2);
+ break;
+ default:
+ break;
+ }
+ }
+
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ int64_t saved_ra = get_register(ra);
+
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->external_function());
+
+ // Based on CpuFeatures::IsSupported(FPU), Loong64 will use either hardware
+ // FPU, or gcc soft-float routines. Hardware FPU is simulated in this
+ // simulator. Soft-float has additional abstraction of ExternalReference,
+ // to support serialization.
+ if (fp_call) {
+ double dval0, dval1; // one or two double parameters
+ int32_t ival; // zero or one integer parameters
+ int64_t iresult = 0; // integer return value
+ double dresult = 0; // double return value
+ GetFpArgs(&dval0, &dval1, &ival);
+ SimulatorRuntimeCall generic_target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Call to host function at %p with args %f, %f",
+ reinterpret_cast<void*>(FUNCTION_ADDR(generic_target)),
+ dval0, dval1);
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ PrintF("Call to host function at %p with arg %f",
+ reinterpret_cast<void*>(FUNCTION_ADDR(generic_target)),
+ dval0);
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Call to host function at %p with args %f, %d",
+ reinterpret_cast<void*>(FUNCTION_ADDR(generic_target)),
+ dval0, ival);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ iresult = target(dval0, dval1);
+ set_register(v0, static_cast<int64_t>(iresult));
+ // set_register(v1, static_cast<int64_t>(iresult >> 32));
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ dresult = target(dval0, dval1);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_CALL: {
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ dresult = target(dval0);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ dresult = target(dval0, ival);
+ SetFpResult(dresult);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+ break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_FP_CALL:
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Returned %f\n", dresult);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08" PRIx64 " \n",
+ reinterpret_cast<void*>(external), arg0);
+ }
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ target(arg0);
+ } else if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64
+ " \n",
+ reinterpret_cast<void*>(external), arg0, arg1);
+ }
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ target(arg0, Redirection::ReverseRedirection(arg1));
+ } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64
+ " \n",
+ reinterpret_cast<void*>(external), arg0, arg1);
+ }
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ target(arg0, arg1);
+ } else if (redirection->type() ==
+ ExternalReference::PROFILING_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64
+ " %08" PRIx64 " \n",
+ reinterpret_cast<void*>(external), arg0, arg1, arg2);
+ }
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
+ target(arg0, arg1, Redirection::ReverseRedirection(arg2));
+ } else {
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
+ redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF(
+ "Call to host function at %p "
+ "args %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
+ " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
+ " , %08" PRIx64 " , %08" PRIx64 " \n",
+ reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2,
+ arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+ }
+ ObjectPair result =
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+ set_register(v0, (int64_t)(result.x));
+ set_register(v1, (int64_t)(result.y));
+ }
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08" PRIx64 " : %08" PRIx64 " \n", get_register(v1),
+ get_register(v0));
+ }
+ set_register(ra, saved_ra);
+ set_pc(get_register(ra));
+
+ } else if (code <= kMaxStopCode) {
+ if (IsWatchpoint(code)) {
+ PrintWatchpoint(code);
+ } else {
+ IncreaseStopCounter(code);
+ HandleStop(code, instr_.instr());
+ }
+ } else {
+ // All remaining break_ codes, and all traps are handled here.
+ Loong64Debugger dbg(this);
+ dbg.Debug();
+ }
+}
+
+// Stop helper functions.
+bool Simulator::IsWatchpoint(uint64_t code) {
+ return (code <= kMaxWatchpointCode);
+}
+
+void Simulator::PrintWatchpoint(uint64_t code) {
+ Loong64Debugger dbg(this);
+ ++break_count_;
+ PrintF("\n---- break %" PRId64 " marker: %3d (instr count: %8" PRId64
+ " ) ----------"
+ "----------------------------------",
+ code, break_count_, icount_);
+ dbg.PrintAllRegs(); // Print registers and continue running.
+}
+
+void Simulator::HandleStop(uint64_t code, Instruction* instr) {
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (IsEnabledStop(code)) {
+ Loong64Debugger dbg(this);
+ dbg.Stop(instr);
+ }
+}
+
+bool Simulator::IsStopInstruction(Instruction* instr) {
+ int32_t opcode_hi15 = instr->Bits(31, 17);
+ uint32_t code = static_cast<uint32_t>(instr->Bits(14, 0));
+ return (opcode_hi15 == 0x15) && code > kMaxWatchpointCode &&
+ code <= kMaxStopCode;
+}
+
+bool Simulator::IsEnabledStop(uint64_t code) {
+ DCHECK_LE(code, kMaxStopCode);
+ DCHECK_GT(code, kMaxWatchpointCode);
+ return !(watched_stops_[code].count & kStopDisabledBit);
+}
+
+void Simulator::EnableStop(uint64_t code) {
+ if (!IsEnabledStop(code)) {
+ watched_stops_[code].count &= ~kStopDisabledBit;
+ }
+}
+
+void Simulator::DisableStop(uint64_t code) {
+ if (IsEnabledStop(code)) {
+ watched_stops_[code].count |= kStopDisabledBit;
+ }
+}
+
+void Simulator::IncreaseStopCounter(uint64_t code) {
+ DCHECK_LE(code, kMaxStopCode);
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
+ PrintF("Stop counter for code %" PRId64
+ " has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n",
+ code);
+ watched_stops_[code].count = 0;
+ EnableStop(code);
+ } else {
+ watched_stops_[code].count++;
+ }
+}
+
+// Print a stop status.
+void Simulator::PrintStopInfo(uint64_t code) {
+ if (code <= kMaxWatchpointCode) {
+ PrintF("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ PrintF("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watched_stops_[code].count & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watched_stops_[code].desc) {
+ PrintF("stop %" PRId64 " - 0x%" PRIx64 " : \t%s, \tcounter = %i, \t%s\n",
+ code, code, state, count, watched_stops_[code].desc);
+ } else {
+ PrintF("stop %" PRId64 " - 0x%" PRIx64 " : \t%s, \tcounter = %i\n", code,
+ code, state, count);
+ }
+ }
+}
+
+void Simulator::SignalException(Exception e) {
+ FATAL("Error: Exception %i raised.", static_cast<int>(e));
+}
+
+template <typename T>
+static T FPAbs(T a);
+
+template <>
+double FPAbs<double>(double a) {
+ return fabs(a);
+}
+
+template <>
+float FPAbs<float>(float a) {
+ return fabsf(a);
+}
+
+template <typename T>
+static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T* result) {
+ if (std::isnan(a) && std::isnan(b)) {
+ *result = a;
+ } else if (std::isnan(a)) {
+ *result = b;
+ } else if (std::isnan(b)) {
+ *result = a;
+ } else if (b == a) {
+ // Handle -0.0 == 0.0 case.
+ // std::signbit() returns int 0 or 1 so subtracting MaxMinKind::kMax
+ // negates the result.
+ *result = std::signbit(b) - static_cast<int>(kind) ? b : a;
+ } else {
+ return false;
+ }
+ return true;
+}
+
+template <typename T>
+static T FPUMin(T a, T b) {
+ T result;
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
+ return result;
+ } else {
+ return b < a ? b : a;
+ }
+}
+
+template <typename T>
+static T FPUMax(T a, T b) {
+ T result;
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, &result)) {
+ return result;
+ } else {
+ return b > a ? b : a;
+ }
+}
+
+template <typename T>
+static T FPUMinA(T a, T b) {
+ T result;
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
+ if (FPAbs(a) < FPAbs(b)) {
+ result = a;
+ } else if (FPAbs(b) < FPAbs(a)) {
+ result = b;
+ } else {
+ result = a < b ? a : b;
+ }
+ }
+ return result;
+}
+
+template <typename T>
+static T FPUMaxA(T a, T b) {
+ T result;
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
+ if (FPAbs(a) > FPAbs(b)) {
+ result = a;
+ } else if (FPAbs(b) > FPAbs(a)) {
+ result = b;
+ } else {
+ result = a > b ? a : b;
+ }
+ }
+ return result;
+}
+
+enum class KeepSign : bool { no = false, yes };
+
+template <typename T, typename std::enable_if<std::is_floating_point<T>::value,
+ int>::type = 0>
+T FPUCanonalizeNaNArg(T result, T arg, KeepSign keepSign = KeepSign::no) {
+ DCHECK(std::isnan(arg));
+ T qNaN = std::numeric_limits<T>::quiet_NaN();
+ if (keepSign == KeepSign::yes) {
+ return std::copysign(qNaN, result);
+ }
+ return qNaN;
+}
+
+template <typename T>
+T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first) {
+ if (std::isnan(first)) {
+ return FPUCanonalizeNaNArg(result, first, keepSign);
+ }
+ return result;
+}
+
+template <typename T, typename... Args>
+T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first, Args... args) {
+ if (std::isnan(first)) {
+ return FPUCanonalizeNaNArg(result, first, keepSign);
+ }
+ return FPUCanonalizeNaNArgs(result, keepSign, args...);
+}
+
+template <typename Func, typename T, typename... Args>
+T FPUCanonalizeOperation(Func f, T first, Args... args) {
+ return FPUCanonalizeOperation(f, KeepSign::no, first, args...);
+}
+
+template <typename Func, typename T, typename... Args>
+T FPUCanonalizeOperation(Func f, KeepSign keepSign, T first, Args... args) {
+ T result = f(first, args...);
+ if (std::isnan(result)) {
+ result = FPUCanonalizeNaNArgs(result, keepSign, first, args...);
+ }
+ return result;
+}
+
+// Handle execution based on instruction types.
+void Simulator::DecodeTypeOp6() {
+ int64_t alu_out;
+ // Next pc.
+ int64_t next_pc = bad_ra;
+
+ // Branch instructions common part.
+ auto BranchAndLinkHelper = [this, &next_pc]() {
+ int64_t current_pc = get_pc();
+ set_register(ra, current_pc + kInstrSize);
+ int32_t offs26_low16 =
+ static_cast<uint32_t>(instr_.Bits(25, 10) << 16) >> 16;
+ int32_t offs26_high10 = static_cast<int32_t>(instr_.Bits(9, 0) << 22) >> 6;
+ int32_t offs26 = offs26_low16 | offs26_high10;
+ next_pc = current_pc + (offs26 << 2);
+ printf_instr("Offs26: %08x\n", offs26);
+ set_pc(next_pc);
+ };
+
+ auto BranchOff16Helper = [this, &next_pc](bool do_branch) {
+ int64_t current_pc = get_pc();
+ int32_t offs16 = static_cast<int32_t>(instr_.Bits(25, 10) << 16) >> 16;
+ printf_instr("Offs16: %08x\n", offs16);
+ int32_t offs = do_branch ? (offs16 << 2) : kInstrSize;
+ next_pc = current_pc + offs;
+ set_pc(next_pc);
+ };
+
+ auto BranchOff21Helper = [this, &next_pc](bool do_branch) {
+ int64_t current_pc = get_pc();
+ int32_t offs21_low16 =
+ static_cast<uint32_t>(instr_.Bits(25, 10) << 16) >> 16;
+ int32_t offs21_high5 = static_cast<int32_t>(instr_.Bits(4, 0) << 27) >> 11;
+ int32_t offs = offs21_low16 | offs21_high5;
+ printf_instr("Offs21: %08x\n", offs);
+ offs = do_branch ? (offs << 2) : kInstrSize;
+ next_pc = current_pc + offs;
+ set_pc(next_pc);
+ };
+
+ auto BranchOff26Helper = [this, &next_pc]() {
+ int64_t current_pc = get_pc();
+ int32_t offs26_low16 =
+ static_cast<uint32_t>(instr_.Bits(25, 10) << 16) >> 16;
+ int32_t offs26_high10 = static_cast<int32_t>(instr_.Bits(9, 0) << 22) >> 6;
+ int32_t offs26 = offs26_low16 | offs26_high10;
+ next_pc = current_pc + (offs26 << 2);
+ printf_instr("Offs26: %08x\n", offs26);
+ set_pc(next_pc);
+ };
+
+ auto JumpOff16Helper = [this, &next_pc]() {
+ int32_t offs16 = static_cast<int32_t>(instr_.Bits(25, 10) << 16) >> 16;
+ printf_instr("JIRL\t %s: %016lx, %s: %016lx, offs16: %x\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), offs16);
+ set_register(rd_reg(), get_pc() + kInstrSize);
+ next_pc = rj() + (offs16 << 2);
+ set_pc(next_pc);
+ };
+
+ switch (instr_.Bits(31, 26) << 26) {
+ case ADDU16I_D: {
+ printf_instr("ADDU16I_D\t %s: %016lx, %s: %016lx, si16: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si16());
+ int32_t si16_upper = static_cast<int32_t>(si16()) << 16;
+ alu_out = static_cast<int64_t>(si16_upper) + rj();
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case BEQZ:
+ printf_instr("BEQZ\t %s: %016lx, ", Registers::Name(rj_reg()), rj());
+ BranchOff21Helper(rj() == 0);
+ break;
+ case BNEZ:
+ printf_instr("BNEZ\t %s: %016lx, ", Registers::Name(rj_reg()), rj());
+ BranchOff21Helper(rj() != 0);
+ break;
+ case BCZ: {
+ if (instr_.Bits(9, 8) == 0b00) {
+ // BCEQZ
+ printf_instr("BCEQZ\t fcc%d: %s, ", cj_reg(), cj() ? "True" : "False");
+ BranchOff21Helper(cj() == false);
+ } else if (instr_.Bits(9, 8) == 0b01) {
+ // BCNEZ
+ printf_instr("BCNEZ\t fcc%d: %s, ", cj_reg(), cj() ? "True" : "False");
+ BranchOff21Helper(cj() == true);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case JIRL:
+ JumpOff16Helper();
+ break;
+ case B:
+ printf_instr("B\t ");
+ BranchOff26Helper();
+ break;
+ case BL:
+ printf_instr("BL\t ");
+ BranchAndLinkHelper();
+ break;
+ case BEQ:
+ printf_instr("BEQ\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()),
+ rj(), Registers::Name(rd_reg()), rd());
+ BranchOff16Helper(rj() == rd());
+ break;
+ case BNE:
+ printf_instr("BNE\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()),
+ rj(), Registers::Name(rd_reg()), rd());
+ BranchOff16Helper(rj() != rd());
+ break;
+ case BLT:
+ printf_instr("BLT\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()),
+ rj(), Registers::Name(rd_reg()), rd());
+ BranchOff16Helper(rj() < rd());
+ break;
+ case BGE:
+ printf_instr("BGE\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()),
+ rj(), Registers::Name(rd_reg()), rd());
+ BranchOff16Helper(rj() >= rd());
+ break;
+ case BLTU:
+ printf_instr("BLTU\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()),
+ rj(), Registers::Name(rd_reg()), rd());
+ BranchOff16Helper(rj_u() < rd_u());
+ break;
+ case BGEU:
+ printf_instr("BGEU\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()),
+ rj(), Registers::Name(rd_reg()), rd());
+ BranchOff16Helper(rj_u() >= rd_u());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp7() {
+ int64_t alu_out;
+
+ switch (instr_.Bits(31, 25) << 25) {
+ case LU12I_W: {
+ printf_instr("LU12I_W\t %s: %016lx, si20: %d\n",
+ Registers::Name(rd_reg()), rd(), si20());
+ int32_t si20_upper = static_cast<int32_t>(si20() << 12);
+ SetResult(rd_reg(), static_cast<int64_t>(si20_upper));
+ break;
+ }
+ case LU32I_D: {
+ printf_instr("LU32I_D\t %s: %016lx, si20: %d\n",
+ Registers::Name(rd_reg()), rd(), si20());
+ int32_t si20_signExtend = static_cast<int32_t>(si20() << 12) >> 12;
+ int64_t lower_32bit_mask = 0xFFFFFFFF;
+ alu_out = (static_cast<int64_t>(si20_signExtend) << 32) |
+ (rd() & lower_32bit_mask);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case PCADDI: {
+ printf_instr("PCADDI\t %s: %016lx, si20: %d\n", Registers::Name(rd_reg()),
+ rd(), si20());
+ int32_t si20_signExtend = static_cast<int32_t>(si20() << 12) >> 10;
+ int64_t current_pc = get_pc();
+ alu_out = static_cast<int64_t>(si20_signExtend) + current_pc;
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case PCALAU12I: {
+ printf_instr("PCALAU12I\t %s: %016lx, si20: %d\n",
+ Registers::Name(rd_reg()), rd(), si20());
+ int32_t si20_signExtend = static_cast<int32_t>(si20() << 12);
+ int64_t current_pc = get_pc();
+ int64_t clear_lower12bit_mask = 0xFFFFFFFFFFFFF000;
+ alu_out = static_cast<int64_t>(si20_signExtend) + current_pc;
+ SetResult(rd_reg(), alu_out & clear_lower12bit_mask);
+ break;
+ }
+ case PCADDU12I: {
+ printf_instr("PCADDU12I\t %s: %016lx, si20: %d\n",
+ Registers::Name(rd_reg()), rd(), si20());
+ int32_t si20_signExtend = static_cast<int32_t>(si20() << 12);
+ int64_t current_pc = get_pc();
+ alu_out = static_cast<int64_t>(si20_signExtend) + current_pc;
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case PCADDU18I: {
+ printf_instr("PCADDU18I\t %s: %016lx, si20: %d\n",
+ Registers::Name(rd_reg()), rd(), si20());
+ int64_t si20_signExtend = (static_cast<int64_t>(si20()) << 44) >> 26;
+ int64_t current_pc = get_pc();
+ alu_out = si20_signExtend + current_pc;
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp8() {
+ int64_t addr = 0x0;
+ int64_t si14_se = (static_cast<int64_t>(si14()) << 50) >> 48;
+
+ switch (instr_.Bits(31, 24) << 24) {
+ case LDPTR_W:
+ printf_instr("LDPTR_W\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ set_register(rd_reg(), ReadW(rj() + si14_se, instr_.instr()));
+ break;
+ case STPTR_W:
+ printf_instr("STPTR_W\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ WriteW(rj() + si14_se, static_cast<int32_t>(rd()), instr_.instr());
+ break;
+ case LDPTR_D:
+ printf_instr("LDPTR_D\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ set_register(rd_reg(), Read2W(rj() + si14_se, instr_.instr()));
+ break;
+ case STPTR_D:
+ printf_instr("STPTR_D\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ Write2W(rj() + si14_se, rd(), instr_.instr());
+ break;
+ case LL_W: {
+ printf_instr("LL_W\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ addr = si14_se + rj();
+ set_register(rd_reg(), ReadW(addr, instr_.instr()));
+ local_monitor_.NotifyLoadLinked(addr, TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr,
+ &global_monitor_thread_);
+ break;
+ }
+ case SC_W: {
+ printf_instr("SC_W\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ addr = si14_se + rj();
+ WriteConditionalW(addr, static_cast<int32_t>(rd()), instr_.instr(),
+ rd_reg());
+ break;
+ }
+ case LL_D: {
+ printf_instr("LL_D\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ addr = si14_se + rj();
+ set_register(rd_reg(), Read2W(addr, instr_.instr()));
+ local_monitor_.NotifyLoadLinked(addr, TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr,
+ &global_monitor_thread_);
+ break;
+ }
+ case SC_D: {
+ printf_instr("SC_D\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ addr = si14_se + rj();
+ WriteConditional2W(addr, rd(), instr_.instr(), rd_reg());
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp10() {
+ int64_t alu_out = 0x0;
+ int64_t si12_se = (static_cast<int64_t>(si12()) << 52) >> 52;
+ uint64_t si12_ze = (static_cast<uint64_t>(ui12()) << 52) >> 52;
+
+ switch (instr_.Bits(31, 22) << 22) {
+ case BSTR_W: {
+ CHECK_EQ(instr_.Bit(21), 1);
+ uint8_t lsbw_ = lsbw();
+ uint8_t msbw_ = msbw();
+ CHECK_LE(lsbw_, msbw_);
+ uint8_t size = msbw_ - lsbw_ + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ if (instr_.Bit(15) == 0) {
+ // BSTRINS_W
+ printf_instr(
+ "BSTRINS_W\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(),
+ msbw_, lsbw_);
+ alu_out = static_cast<int32_t>((rd_u() & ~(mask << lsbw_)) |
+ ((rj_u() & mask) << lsbw_));
+ } else {
+ // BSTRPICK_W
+ printf_instr(
+ "BSTRPICK_W\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(),
+ msbw_, lsbw_);
+ alu_out = static_cast<int32_t>((rj_u() & (mask << lsbw_)) >> lsbw_);
+ }
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case BSTRINS_D: {
+ uint8_t lsbd_ = lsbd();
+ uint8_t msbd_ = msbd();
+ CHECK_LE(lsbd_, msbd_);
+ printf_instr(
+ "BSTRINS_D\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(),
+ msbd_, lsbd_);
+ uint8_t size = msbd_ - lsbd_ + 1;
+ if (size < 64) {
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = (rd_u() & ~(mask << lsbd_)) | ((rj_u() & mask) << lsbd_);
+ SetResult(rd_reg(), alu_out);
+ } else if (size == 64) {
+ SetResult(rd_reg(), rj());
+ }
+ break;
+ }
+ case BSTRPICK_D: {
+ uint8_t lsbd_ = lsbd();
+ uint8_t msbd_ = msbd();
+ CHECK_LE(lsbd_, msbd_);
+ printf_instr(
+ "BSTRPICK_D\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(),
+ msbd_, lsbd_);
+ uint8_t size = msbd_ - lsbd_ + 1;
+ if (size < 64) {
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = (rj_u() & (mask << lsbd_)) >> lsbd_;
+ SetResult(rd_reg(), alu_out);
+ } else if (size == 64) {
+ SetResult(rd_reg(), rj());
+ }
+ break;
+ }
+ case SLTI:
+ printf_instr("SLTI\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_se);
+ SetResult(rd_reg(), rj() < si12_se ? 1 : 0);
+ break;
+ case SLTUI:
+ printf_instr("SLTUI\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_se);
+ SetResult(rd_reg(), rj_u() < static_cast<uint64_t>(si12_se) ? 1 : 0);
+ break;
+ case ADDI_W: {
+ printf_instr("ADDI_W\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_se);
+ int32_t alu32_out =
+ static_cast<int32_t>(rj()) + static_cast<int32_t>(si12_se);
+ SetResult(rd_reg(), alu32_out);
+ break;
+ }
+ case ADDI_D:
+ printf_instr("ADDI_D\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_se);
+ SetResult(rd_reg(), rj() + si12_se);
+ break;
+ case LU52I_D: {
+ printf_instr("LU52I_D\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_se);
+ int64_t si12_se = static_cast<int64_t>(si12()) << 52;
+ uint64_t mask = (1ULL << 52) - 1;
+ alu_out = si12_se + (rj() & mask);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case ANDI:
+ printf_instr("ANDI\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ SetResult(rd_reg(), rj() & si12_ze);
+ break;
+ case ORI:
+ printf_instr("ORI\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ SetResult(rd_reg(), rj_u() | si12_ze);
+ break;
+ case XORI:
+ printf_instr("XORI\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ SetResult(rd_reg(), rj_u() ^ si12_ze);
+ break;
+ case LD_B:
+ printf_instr("LD_B\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), ReadB(rj() + si12_se));
+ break;
+ case LD_H:
+ printf_instr("LD_H\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), ReadH(rj() + si12_se, instr_.instr()));
+ break;
+ case LD_W:
+ printf_instr("LD_W\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), ReadW(rj() + si12_se, instr_.instr()));
+ break;
+ case LD_D:
+ printf_instr("LD_D\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), Read2W(rj() + si12_se, instr_.instr()));
+ break;
+ case ST_B:
+ printf_instr("ST_B\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ WriteB(rj() + si12_se, static_cast<int8_t>(rd()));
+ break;
+ case ST_H:
+ printf_instr("ST_H\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ WriteH(rj() + si12_se, static_cast<int16_t>(rd()), instr_.instr());
+ break;
+ case ST_W:
+ printf_instr("ST_W\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ WriteW(rj() + si12_se, static_cast<int32_t>(rd()), instr_.instr());
+ break;
+ case ST_D:
+ printf_instr("ST_D\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ Write2W(rj() + si12_se, rd(), instr_.instr());
+ break;
+ case LD_BU:
+ printf_instr("LD_BU\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), ReadBU(rj() + si12_se));
+ break;
+ case LD_HU:
+ printf_instr("LD_HU\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), ReadHU(rj() + si12_se, instr_.instr()));
+ break;
+ case LD_WU:
+ printf_instr("LD_WU\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), ReadWU(rj() + si12_se, instr_.instr()));
+ break;
+ case FLD_S: {
+ printf_instr("FLD_S\t %s: %016f, %s: %016lx, si12: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ Registers::Name(rj_reg()), rj(), si12_ze);
+ set_fpu_register(fd_reg(), kFPUInvalidResult); // Trash upper 32 bits.
+ set_fpu_register_word(
+ fd_reg(), ReadW(rj() + si12_se, instr_.instr(), FLOAT_DOUBLE));
+ break;
+ }
+ case FST_S: {
+ printf_instr("FST_S\t %s: %016f, %s: %016lx, si12: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ Registers::Name(rj_reg()), rj(), si12_ze);
+ int32_t alu_out_32 = static_cast<int32_t>(get_fpu_register(fd_reg()));
+ WriteW(rj() + si12_se, alu_out_32, instr_.instr());
+ break;
+ }
+ case FLD_D: {
+ printf_instr("FLD_D\t %s: %016f, %s: %016lx, si12: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj(), si12_ze);
+ set_fpu_register_double(fd_reg(), ReadD(rj() + si12_se, instr_.instr()));
+ TraceMemRd(rj() + si12_se, get_fpu_register(fd_reg()), DOUBLE);
+ break;
+ }
+ case FST_D: {
+ printf_instr("FST_D\t %s: %016f, %s: %016lx, si12: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj(), si12_ze);
+ WriteD(rj() + si12_se, get_fpu_register_double(fd_reg()), instr_.instr());
+ TraceMemWr(rj() + si12_se, get_fpu_register(fd_reg()), DWORD);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp12() {
+ switch (instr_.Bits(31, 20) << 20) {
+ case FMADD_S:
+ printf_instr("FMADD_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fk_reg()), fk_float(),
+ FPURegisters::Name(fa_reg()), fa_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(), std::fma(fj_float(), fk_float(), fa_float()));
+ break;
+ case FMADD_D:
+ printf_instr("FMADD_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fk_reg()), fk_double(),
+ FPURegisters::Name(fa_reg()), fa_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUDoubleResult(fd_reg(),
+ std::fma(fj_double(), fk_double(), fa_double()));
+ break;
+ case FMSUB_S:
+ printf_instr("FMSUB_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fk_reg()), fk_float(),
+ FPURegisters::Name(fa_reg()), fa_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(),
+ std::fma(fj_float(), fk_float(), -fa_float()));
+ break;
+ case FMSUB_D:
+ printf_instr("FMSUB_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fk_reg()), fk_double(),
+ FPURegisters::Name(fa_reg()), fa_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUDoubleResult(fd_reg(),
+ std::fma(fj_double(), fk_double(), -fa_double()));
+ break;
+ case FNMADD_S:
+ printf_instr("FNMADD_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fk_reg()), fk_float(),
+ FPURegisters::Name(fa_reg()), fa_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(),
+ std::fma(-fj_float(), fk_float(), -fa_float()));
+ break;
+ case FNMADD_D:
+ printf_instr("FNMADD_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fk_reg()), fk_double(),
+ FPURegisters::Name(fa_reg()), fa_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUDoubleResult(fd_reg(),
+ std::fma(-fj_double(), fk_double(), -fa_double()));
+ break;
+ case FNMSUB_S:
+ printf_instr("FNMSUB_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fk_reg()), fk_float(),
+ FPURegisters::Name(fa_reg()), fa_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(),
+ std::fma(-fj_float(), fk_float(), fa_float()));
+ break;
+ case FNMSUB_D:
+ printf_instr("FNMSUB_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fk_reg()), fk_double(),
+ FPURegisters::Name(fa_reg()), fa_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUDoubleResult(fd_reg(),
+ std::fma(-fj_double(), fk_double(), fa_double()));
+ break;
+ case FCMP_COND_S: {
+ CHECK_EQ(instr_.Bits(4, 3), 0);
+ float fj = fj_float();
+ float fk = fk_float();
+ switch (cond()) {
+ case CAF: {
+ printf_instr("FCMP_CAF_S fcc%d\n", cd_reg());
+ set_cf_register(cd_reg(), false);
+ break;
+ }
+ case CUN: {
+ printf_instr("FCMP_CUN_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CEQ: {
+ printf_instr("FCMP_CEQ_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), fj == fk);
+ break;
+ }
+ case CUEQ: {
+ printf_instr("FCMP_CUEQ_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj == fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CLT: {
+ printf_instr("FCMP_CLT_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), fj < fk);
+ break;
+ }
+ case CULT: {
+ printf_instr("FCMP_CULT_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj < fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CLE: {
+ printf_instr("FCMP_CLE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), fj <= fk);
+ break;
+ }
+ case CULE: {
+ printf_instr("FCMP_CULE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj <= fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CNE: {
+ printf_instr("FCMP_CNE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), (fj < fk) || (fj > fk));
+ break;
+ }
+ case COR: {
+ printf_instr("FCMP_COR_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), !std::isnan(fj) && !std::isnan(fk));
+ break;
+ }
+ case CUNE: {
+ printf_instr("FCMP_CUNE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj != fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case SAF:
+ case SUN:
+ case SEQ:
+ case SUEQ:
+ case SLT:
+ case SULT:
+ case SLE:
+ case SULE:
+ case SNE:
+ case SOR:
+ case SUNE:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case FCMP_COND_D: {
+ CHECK_EQ(instr_.Bits(4, 3), 0);
+ double fj = fj_double();
+ double fk = fk_double();
+ switch (cond()) {
+ case CAF: {
+ printf_instr("FCMP_CAF_D fcc%d\n", cd_reg());
+ set_cf_register(cd_reg(), false);
+ break;
+ }
+ case CUN: {
+ printf_instr("FCMP_CUN_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CEQ: {
+ printf_instr("FCMP_CEQ_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), fj == fk);
+ break;
+ }
+ case CUEQ: {
+ printf_instr("FCMP_CUEQ_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj == fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CLT: {
+ printf_instr("FCMP_CLT_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), fj < fk);
+ break;
+ }
+ case CULT: {
+ printf_instr("FCMP_CULT_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj < fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CLE: {
+ printf_instr("FCMP_CLE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), fj <= fk);
+ break;
+ }
+ case CULE: {
+ printf_instr("FCMP_CULE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj <= fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CNE: {
+ printf_instr("FCMP_CNE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), (fj < fk) || (fj > fk));
+ break;
+ }
+ case COR: {
+ printf_instr("FCMP_COR_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), !std::isnan(fj) && !std::isnan(fk));
+ break;
+ }
+ case CUNE: {
+ printf_instr("FCMP_CUNE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj != fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case SAF:
+ case SUN:
+ case SEQ:
+ case SUEQ:
+ case SLT:
+ case SULT:
+ case SLE:
+ case SULE:
+ case SNE:
+ case SOR:
+ case SUNE:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case FSEL: {
+ CHECK_EQ(instr_.Bits(19, 18), 0);
+ printf_instr("FSEL fcc%d, %s: %016f, %s: %016f, %s: %016f\n", ca_reg(),
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ if (ca() == 0) {
+ SetFPUDoubleResult(fd_reg(), fj_double());
+ } else {
+ SetFPUDoubleResult(fd_reg(), fk_double());
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp14() {
+ int64_t alu_out = 0x0;
+ int32_t alu32_out = 0x0;
+
+ switch (instr_.Bits(31, 18) << 18) {
+ case ALSL: {
+ uint8_t sa = sa2() + 1;
+ alu32_out =
+ (static_cast<int32_t>(rj()) << sa) + static_cast<int32_t>(rk());
+ if (instr_.Bit(17) == 0) {
+ // ALSL_W
+ printf_instr("ALSL_W\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk(), sa2());
+ SetResult(rd_reg(), alu32_out);
+ } else {
+ // ALSL_WU
+ printf_instr("ALSL_WU\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk(), sa2());
+ SetResult(rd_reg(), static_cast<uint32_t>(alu32_out));
+ }
+ break;
+ }
+ case BYTEPICK_W: {
+ CHECK_EQ(instr_.Bit(17), 0);
+ printf_instr("BYTEPICK_W\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk(), sa2());
+ uint8_t sa = sa2() * 8;
+ if (sa == 0) {
+ alu32_out = static_cast<int32_t>(rk());
+ } else {
+ int32_t mask = (1 << 31) >> (sa - 1);
+ int32_t rk_hi = (static_cast<int32_t>(rk()) & (~mask)) << sa;
+ int32_t rj_lo = (static_cast<uint32_t>(rj()) & mask) >> (32 - sa);
+ alu32_out = rk_hi | rj_lo;
+ }
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case BYTEPICK_D: {
+ printf_instr("BYTEPICK_D\t %s: %016lx, %s: %016lx, %s: %016lx, sa3: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk(), sa3());
+ uint8_t sa = sa3() * 8;
+ if (sa == 0) {
+ alu_out = rk();
+ } else {
+ int64_t mask = (1LL << 63) >> (sa - 1);
+ int64_t rk_hi = (rk() & (~mask)) << sa;
+ int64_t rj_lo = static_cast<uint64_t>(rj() & mask) >> (64 - sa);
+ alu_out = rk_hi | rj_lo;
+ }
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case ALSL_D: {
+ printf_instr("ALSL_D\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk(), sa2());
+ CHECK_EQ(instr_.Bit(17), 0);
+ uint8_t sa = sa2() + 1;
+ alu_out = (rj() << sa) + rk();
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case SLLI: {
+ DCHECK_EQ(instr_.Bit(17), 0);
+ if (instr_.Bits(17, 15) == 0b001) {
+ // SLLI_W
+ printf_instr("SLLI_W\t %s: %016lx, %s: %016lx, ui5: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui5());
+ alu32_out = static_cast<int32_t>(rj()) << ui5();
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ } else if ((instr_.Bits(17, 16) == 0b01)) {
+ // SLLI_D
+ printf_instr("SLLI_D\t %s: %016lx, %s: %016lx, ui6: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui6());
+ SetResult(rd_reg(), rj() << ui6());
+ }
+ break;
+ }
+ case SRLI: {
+ DCHECK_EQ(instr_.Bit(17), 0);
+ if (instr_.Bits(17, 15) == 0b001) {
+ // SRLI_W
+ printf_instr("SRLI_W\t %s: %016lx, %s: %016lx, ui5: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui5());
+ alu32_out = static_cast<uint32_t>(rj()) >> ui5();
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ } else if (instr_.Bits(17, 16) == 0b01) {
+ // SRLI_D
+ printf_instr("SRLI_D\t %s: %016lx, %s: %016lx, ui6: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui6());
+ SetResult(rd_reg(), rj_u() >> ui6());
+ }
+ break;
+ }
+ case SRAI: {
+ DCHECK_EQ(instr_.Bit(17), 0);
+ if (instr_.Bits(17, 15) == 0b001) {
+ // SRAI_W
+ printf_instr("SRAI_W\t %s: %016lx, %s: %016lx, ui5: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui5());
+ alu32_out = static_cast<int32_t>(rj()) >> ui5();
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ } else if (instr_.Bits(17, 16) == 0b01) {
+ // SRAI_D
+ printf_instr("SRAI_D\t %s: %016lx, %s: %016lx, ui6: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui6());
+ SetResult(rd_reg(), rj() >> ui6());
+ }
+ break;
+ }
+ case ROTRI: {
+ DCHECK_EQ(instr_.Bit(17), 0);
+ if (instr_.Bits(17, 15) == 0b001) {
+ // ROTRI_W
+ printf_instr("ROTRI_W\t %s: %016lx, %s: %016lx, ui5: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui5());
+ alu32_out = static_cast<int32_t>(
+ base::bits::RotateRight32(static_cast<const uint32_t>(rj_u()),
+ static_cast<const uint32_t>(ui5())));
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ } else if (instr_.Bits(17, 16) == 0b01) {
+ // ROTRI_D
+ printf_instr("ROTRI_D\t %s: %016lx, %s: %016lx, ui6: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui6());
+ alu_out =
+ static_cast<int64_t>(base::bits::RotateRight64(rj_u(), ui6()));
+ SetResult(rd_reg(), alu_out);
+ printf_instr("ROTRI, %s, %s, %d\n", Registers::Name(rd_reg()),
+ Registers::Name(rj_reg()), ui6());
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp17() {
+ int64_t alu_out;
+
+ switch (instr_.Bits(31, 15) << 15) {
+ case ADD_W: {
+ printf_instr("ADD_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ int32_t alu32_out = static_cast<int32_t>(rj() + rk());
+ // Sign-extend result of 32bit operation into 64bit register.
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case ADD_D:
+ printf_instr("ADD_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() + rk());
+ break;
+ case SUB_W: {
+ printf_instr("SUB_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ int32_t alu32_out = static_cast<int32_t>(rj() - rk());
+ // Sign-extend result of 32bit operation into 64bit register.
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case SUB_D:
+ printf_instr("SUB_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() - rk());
+ break;
+ case SLT:
+ printf_instr("SLT\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() < rk() ? 1 : 0);
+ break;
+ case SLTU:
+ printf_instr("SLTU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj_u() < rk_u() ? 1 : 0);
+ break;
+ case MASKEQZ:
+ printf_instr("MASKEQZ\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rk() == 0 ? rj() : 0);
+ break;
+ case MASKNEZ:
+ printf_instr("MASKNEZ\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rk() != 0 ? rj() : 0);
+ break;
+ case NOR:
+ printf_instr("NOR\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), ~(rj() | rk()));
+ break;
+ case AND:
+ printf_instr("AND\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() & rk());
+ break;
+ case OR:
+ printf_instr("OR\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() | rk());
+ break;
+ case XOR:
+ printf_instr("XOR\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() ^ rk());
+ break;
+ case ORN:
+ printf_instr("ORN\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() | (~rk()));
+ break;
+ case ANDN:
+ printf_instr("ANDN\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() & (~rk()));
+ break;
+ case SLL_W:
+ printf_instr("SLL_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), (int32_t)rj() << (rk_u() % 32));
+ break;
+ case SRL_W: {
+ printf_instr("SRL_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ alu_out = static_cast<int32_t>((uint32_t)rj_u() >> (rk_u() % 32));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case SRA_W:
+ printf_instr("SRA_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), (int32_t)rj() >> (rk_u() % 32));
+ break;
+ case SLL_D:
+ printf_instr("SLL_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() << (rk_u() % 64));
+ break;
+ case SRL_D: {
+ printf_instr("SRL_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ alu_out = static_cast<int64_t>(rj_u() >> (rk_u() % 64));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case SRA_D:
+ printf_instr("SRA_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() >> (rk_u() % 64));
+ break;
+ case ROTR_W: {
+ printf_instr("ROTR_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ alu_out = static_cast<int32_t>(
+ base::bits::RotateRight32(static_cast<const uint32_t>(rj_u()),
+ static_cast<const uint32_t>(rk_u() % 32)));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case ROTR_D: {
+ printf_instr("ROTR_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ alu_out = static_cast<int64_t>(
+ base::bits::RotateRight64((rj_u()), (rk_u() % 64)));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case MUL_W: {
+ printf_instr("MUL_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ alu_out = static_cast<int32_t>(rj()) * static_cast<int32_t>(rk());
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case MULH_W: {
+ printf_instr("MULH_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ int32_t rj_lo = static_cast<int32_t>(rj());
+ int32_t rk_lo = static_cast<int32_t>(rk());
+ alu_out = static_cast<int64_t>(rj_lo) * static_cast<int64_t>(rk_lo);
+ SetResult(rd_reg(), alu_out >> 32);
+ break;
+ }
+ case MULH_WU: {
+ printf_instr("MULH_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ uint32_t rj_lo = static_cast<uint32_t>(rj_u());
+ uint32_t rk_lo = static_cast<uint32_t>(rk_u());
+ alu_out = static_cast<uint64_t>(rj_lo) * static_cast<uint64_t>(rk_lo);
+ SetResult(rd_reg(), alu_out >> 32);
+ break;
+ }
+ case MUL_D:
+ printf_instr("MUL_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() * rk());
+ break;
+ case MULH_D:
+ printf_instr("MULH_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), MultiplyHighSigned(rj(), rk()));
+ break;
+ case MULH_DU:
+ printf_instr("MULH_DU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), MultiplyHighUnsigned(rj_u(), rk_u()));
+ break;
+ case MULW_D_W: {
+ printf_instr("MULW_D_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ int64_t rj_i32 = static_cast<int32_t>(rj());
+ int64_t rk_i32 = static_cast<int32_t>(rk());
+ SetResult(rd_reg(), rj_i32 * rk_i32);
+ break;
+ }
+ case MULW_D_WU: {
+ printf_instr("MULW_D_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ uint64_t rj_u32 = static_cast<uint32_t>(rj_u());
+ uint64_t rk_u32 = static_cast<uint32_t>(rk_u());
+ SetResult(rd_reg(), rj_u32 * rk_u32);
+ break;
+ }
+ case DIV_W: {
+ printf_instr("DIV_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ int32_t rj_i32 = static_cast<int32_t>(rj());
+ int32_t rk_i32 = static_cast<int32_t>(rk());
+ if (rj_i32 == INT_MIN && rk_i32 == -1) {
+ SetResult(rd_reg(), INT_MIN);
+ } else if (rk_i32 != 0) {
+ SetResult(rd_reg(), rj_i32 / rk_i32);
+ }
+ break;
+ }
+ case MOD_W: {
+ printf_instr("MOD_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ int32_t rj_i32 = static_cast<int32_t>(rj());
+ int32_t rk_i32 = static_cast<int32_t>(rk());
+ if (rj_i32 == INT_MIN && rk_i32 == -1) {
+ SetResult(rd_reg(), 0);
+ } else if (rk_i32 != 0) {
+ SetResult(rd_reg(), rj_i32 % rk_i32);
+ }
+ break;
+ }
+ case DIV_WU: {
+ printf_instr("DIV_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ uint32_t rj_u32 = static_cast<uint32_t>(rj());
+ uint32_t rk_u32 = static_cast<uint32_t>(rk());
+ if (rk_u32 != 0) {
+ SetResult(rd_reg(), static_cast<int32_t>(rj_u32 / rk_u32));
+ }
+ break;
+ }
+ case MOD_WU: {
+ printf_instr("MOD_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ uint32_t rj_u32 = static_cast<uint32_t>(rj());
+ uint32_t rk_u32 = static_cast<uint32_t>(rk());
+ if (rk_u32 != 0) {
+ SetResult(rd_reg(), static_cast<int32_t>(rj_u32 % rk_u32));
+ }
+ break;
+ }
+ case DIV_D: {
+ printf_instr("DIV_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ if (rj() == LONG_MIN && rk() == -1) {
+ SetResult(rd_reg(), LONG_MIN);
+ } else if (rk() != 0) {
+ SetResult(rd_reg(), rj() / rk());
+ }
+ break;
+ }
+ case MOD_D: {
+ printf_instr("MOD_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ if (rj() == LONG_MIN && rk() == -1) {
+ SetResult(rd_reg(), 0);
+ } else if (rk() != 0) {
+ SetResult(rd_reg(), rj() % rk());
+ }
+ break;
+ }
+ case DIV_DU: {
+ printf_instr("DIV_DU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ if (rk_u() != 0) {
+ SetResult(rd_reg(), static_cast<int64_t>(rj_u() / rk_u()));
+ }
+ break;
+ }
+ case MOD_DU: {
+ printf_instr("MOD_DU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ if (rk_u() != 0) {
+ SetResult(rd_reg(), static_cast<int64_t>(rj_u() % rk_u()));
+ }
+ break;
+ }
+ case BREAK:
+ printf_instr("BREAK\t code: %x\n", instr_.Bits(14, 0));
+ SoftwareInterrupt();
+ break;
+ case FADD_S: {
+ printf_instr("FADD_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs + rhs; },
+ fj_float(), fk_float()));
+ break;
+ }
+ case FADD_D: {
+ printf_instr("FADD_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs + rhs; },
+ fj_double(), fk_double()));
+ break;
+ }
+ case FSUB_S: {
+ printf_instr("FSUB_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs - rhs; },
+ fj_float(), fk_float()));
+ break;
+ }
+ case FSUB_D: {
+ printf_instr("FSUB_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs - rhs; },
+ fj_double(), fk_double()));
+ break;
+ }
+ case FMUL_S: {
+ printf_instr("FMUL_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs * rhs; },
+ fj_float(), fk_float()));
+ break;
+ }
+ case FMUL_D: {
+ printf_instr("FMUL_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs * rhs; },
+ fj_double(), fk_double()));
+ break;
+ }
+ case FDIV_S: {
+ printf_instr("FDIV_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs / rhs; },
+ fj_float(), fk_float()));
+ break;
+ }
+ case FDIV_D: {
+ printf_instr("FDIV_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs / rhs; },
+ fj_double(), fk_double()));
+ break;
+ }
+ case FMAX_S:
+ printf_instr("FMAX_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(fd_reg(), FPUMax(fk_float(), fj_float()));
+ break;
+ case FMAX_D:
+ printf_instr("FMAX_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(), FPUMax(fk_double(), fj_double()));
+ break;
+ case FMIN_S:
+ printf_instr("FMIN_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(fd_reg(), FPUMin(fk_float(), fj_float()));
+ break;
+ case FMIN_D:
+ printf_instr("FMIN_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(), FPUMin(fk_double(), fj_double()));
+ break;
+ case FMAXA_S:
+ printf_instr("FMAXA_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(fd_reg(), FPUMaxA(fk_float(), fj_float()));
+ break;
+ case FMAXA_D:
+ printf_instr("FMAXA_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(), FPUMaxA(fk_double(), fj_double()));
+ break;
+ case FMINA_S:
+ printf_instr("FMINA_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(fd_reg(), FPUMinA(fk_float(), fj_float()));
+ break;
+ case FMINA_D:
+ printf_instr("FMINA_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(), FPUMinA(fk_double(), fj_double()));
+ break;
+ case LDX_B:
+ printf_instr("LDX_B\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), ReadB(rj() + rk()));
+ break;
+ case LDX_H:
+ printf_instr("LDX_H\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), ReadH(rj() + rk(), instr_.instr()));
+ break;
+ case LDX_W:
+ printf_instr("LDX_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), ReadW(rj() + rk(), instr_.instr()));
+ break;
+ case LDX_D:
+ printf_instr("LDX_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), Read2W(rj() + rk(), instr_.instr()));
+ break;
+ case STX_B:
+ printf_instr("STX_B\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ WriteB(rj() + rk(), static_cast<int8_t>(rd()));
+ break;
+ case STX_H:
+ printf_instr("STX_H\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ WriteH(rj() + rk(), static_cast<int16_t>(rd()), instr_.instr());
+ break;
+ case STX_W:
+ printf_instr("STX_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ WriteW(rj() + rk(), static_cast<int32_t>(rd()), instr_.instr());
+ break;
+ case STX_D:
+ printf_instr("STX_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ Write2W(rj() + rk(), rd(), instr_.instr());
+ break;
+ case LDX_BU:
+ printf_instr("LDX_BU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), ReadBU(rj() + rk()));
+ break;
+ case LDX_HU:
+ printf_instr("LDX_HU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), ReadHU(rj() + rk(), instr_.instr()));
+ break;
+ case LDX_WU:
+ printf_instr("LDX_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), ReadWU(rj() + rk(), instr_.instr()));
+ break;
+ case FLDX_S:
+ printf_instr("FLDX_S\t %s: %016f, %s: %016lx, %s: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()),
+ rk());
+ set_fpu_register(fd_reg(), kFPUInvalidResult); // Trash upper 32 bits.
+ set_fpu_register_word(fd_reg(),
+ ReadW(rj() + rk(), instr_.instr(), FLOAT_DOUBLE));
+ break;
+ case FLDX_D:
+ printf_instr("FLDX_D\t %s: %016f, %s: %016lx, %s: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()),
+ rk());
+ set_fpu_register_double(fd_reg(), ReadD(rj() + rk(), instr_.instr()));
+ break;
+ case FSTX_S:
+ printf_instr("FSTX_S\t %s: %016f, %s: %016lx, %s: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()),
+ rk());
+ WriteW(rj() + rk(), static_cast<int32_t>(get_fpu_register(fd_reg())),
+ instr_.instr());
+ break;
+ case FSTX_D:
+ printf_instr("FSTX_D\t %s: %016f, %s: %016lx, %s: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()),
+ rk());
+ WriteD(rj() + rk(), get_fpu_register_double(fd_reg()), instr_.instr());
+ break;
+ case AMSWAP_W:
+ printf("Sim UNIMPLEMENTED: AMSWAP_W\n");
+ UNIMPLEMENTED();
+ case AMSWAP_D:
+ printf("Sim UNIMPLEMENTED: AMSWAP_D\n");
+ UNIMPLEMENTED();
+ case AMADD_W:
+ printf("Sim UNIMPLEMENTED: AMADD_W\n");
+ UNIMPLEMENTED();
+ case AMADD_D:
+ printf("Sim UNIMPLEMENTED: AMADD_D\n");
+ UNIMPLEMENTED();
+ case AMAND_W:
+ printf("Sim UNIMPLEMENTED: AMAND_W\n");
+ UNIMPLEMENTED();
+ case AMAND_D:
+ printf("Sim UNIMPLEMENTED: AMAND_D\n");
+ UNIMPLEMENTED();
+ case AMOR_W:
+ printf("Sim UNIMPLEMENTED: AMOR_W\n");
+ UNIMPLEMENTED();
+ case AMOR_D:
+ printf("Sim UNIMPLEMENTED: AMOR_D\n");
+ UNIMPLEMENTED();
+ case AMXOR_W:
+ printf("Sim UNIMPLEMENTED: AMXOR_W\n");
+ UNIMPLEMENTED();
+ case AMXOR_D:
+ printf("Sim UNIMPLEMENTED: AMXOR_D\n");
+ UNIMPLEMENTED();
+ case AMMAX_W:
+ printf("Sim UNIMPLEMENTED: AMMAX_W\n");
+ UNIMPLEMENTED();
+ case AMMAX_D:
+ printf("Sim UNIMPLEMENTED: AMMAX_D\n");
+ UNIMPLEMENTED();
+ case AMMIN_W:
+ printf("Sim UNIMPLEMENTED: AMMIN_W\n");
+ UNIMPLEMENTED();
+ case AMMIN_D:
+ printf("Sim UNIMPLEMENTED: AMMIN_D\n");
+ UNIMPLEMENTED();
+ case AMMAX_WU:
+ printf("Sim UNIMPLEMENTED: AMMAX_WU\n");
+ UNIMPLEMENTED();
+ case AMMAX_DU:
+ printf("Sim UNIMPLEMENTED: AMMAX_DU\n");
+ UNIMPLEMENTED();
+ case AMMIN_WU:
+ printf("Sim UNIMPLEMENTED: AMMIN_WU\n");
+ UNIMPLEMENTED();
+ case AMMIN_DU:
+ printf("Sim UNIMPLEMENTED: AMMIN_DU\n");
+ UNIMPLEMENTED();
+ case AMSWAP_DB_W: {
+ printf_instr("AMSWAP_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int32_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), ReadW(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditionalW(rj(), static_cast<int32_t>(rk()), instr_.instr(),
+ rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMSWAP_DB_D: {
+ printf_instr("AMSWAP_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int64_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), Read2W(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditional2W(rj(), rk(), instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMADD_DB_W: {
+ printf_instr("AMADD_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int32_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), ReadW(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditionalW(rj(),
+ static_cast<int32_t>(static_cast<int32_t>(rk()) +
+ static_cast<int32_t>(rd())),
+ instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMADD_DB_D: {
+ printf_instr("AMADD_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int64_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), Read2W(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditional2W(rj(), rk() + rd(), instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMAND_DB_W: {
+ printf_instr("AMAND_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int32_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), ReadW(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditionalW(rj(),
+ static_cast<int32_t>(static_cast<int32_t>(rk()) &
+ static_cast<int32_t>(rd())),
+ instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMAND_DB_D: {
+ printf_instr("AMAND_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int64_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), Read2W(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditional2W(rj(), rk() & rd(), instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMOR_DB_W: {
+ printf_instr("AMOR_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int32_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), ReadW(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditionalW(rj(),
+ static_cast<int32_t>(static_cast<int32_t>(rk()) |
+ static_cast<int32_t>(rd())),
+ instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMOR_DB_D: {
+ printf_instr("AMOR_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int64_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), Read2W(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditional2W(rj(), rk() | rd(), instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMXOR_DB_W: {
+ printf_instr("AMXOR_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int32_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), ReadW(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditionalW(rj(),
+ static_cast<int32_t>(static_cast<int32_t>(rk()) ^
+ static_cast<int32_t>(rd())),
+ instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMXOR_DB_D: {
+ printf_instr("AMXOR_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int64_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), Read2W(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditional2W(rj(), rk() ^ rd(), instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMMAX_DB_W:
+ printf("Sim UNIMPLEMENTED: AMMAX_DB_W\n");
+ UNIMPLEMENTED();
+ case AMMAX_DB_D:
+ printf("Sim UNIMPLEMENTED: AMMAX_DB_D\n");
+ UNIMPLEMENTED();
+ case AMMIN_DB_W:
+ printf("Sim UNIMPLEMENTED: AMMIN_DB_W\n");
+ UNIMPLEMENTED();
+ case AMMIN_DB_D:
+ printf("Sim UNIMPLEMENTED: AMMIN_DB_D\n");
+ UNIMPLEMENTED();
+ case AMMAX_DB_WU:
+ printf("Sim UNIMPLEMENTED: AMMAX_DB_WU\n");
+ UNIMPLEMENTED();
+ case AMMAX_DB_DU:
+ printf("Sim UNIMPLEMENTED: AMMAX_DB_DU\n");
+ UNIMPLEMENTED();
+ case AMMIN_DB_WU:
+ printf("Sim UNIMPLEMENTED: AMMIN_DB_WU\n");
+ UNIMPLEMENTED();
+ case AMMIN_DB_DU:
+ printf("Sim UNIMPLEMENTED: AMMIN_DB_DU\n");
+ UNIMPLEMENTED();
+ case DBAR:
+ printf_instr("DBAR\n");
+ break;
+ case IBAR:
+ printf("Sim UNIMPLEMENTED: IBAR\n");
+ UNIMPLEMENTED();
+ case FSCALEB_S:
+ printf("Sim UNIMPLEMENTED: FSCALEB_S\n");
+ UNIMPLEMENTED();
+ case FSCALEB_D:
+ printf("Sim UNIMPLEMENTED: FSCALEB_D\n");
+ UNIMPLEMENTED();
+ case FCOPYSIGN_S:
+ printf("Sim UNIMPLEMENTED: FCOPYSIGN_S\n");
+ UNIMPLEMENTED();
+ case FCOPYSIGN_D:
+ printf("Sim UNIMPLEMENTED: FCOPYSIGN_D\n");
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp22() {
+ int64_t alu_out;
+
+ switch (instr_.Bits(31, 10) << 10) {
+ case CLZ_W: {
+ printf_instr("CLZ_W\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ alu_out = base::bits::CountLeadingZeros32(static_cast<int32_t>(rj_u()));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case CTZ_W: {
+ printf_instr("CTZ_W\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ alu_out = base::bits::CountTrailingZeros32(static_cast<int32_t>(rj_u()));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case CLZ_D: {
+ printf_instr("CLZ_D\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ alu_out = base::bits::CountLeadingZeros64(static_cast<int64_t>(rj_u()));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case CTZ_D: {
+ printf_instr("CTZ_D\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ alu_out = base::bits::CountTrailingZeros64(static_cast<int64_t>(rj_u()));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case REVB_2H: {
+ printf_instr("REVB_2H\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint32_t input = static_cast<uint32_t>(rj());
+ uint64_t output = 0;
+
+ uint32_t mask = 0xFF000000;
+ for (int i = 0; i < 4; i++) {
+ uint32_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 8;
+ } else {
+ tmp = tmp << 8;
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case REVB_4H: {
+ printf_instr("REVB_4H\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint64_t input = rj_u();
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFF00000000000000;
+ for (int i = 0; i < 8; i++) {
+ uint64_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 8;
+ } else {
+ tmp = tmp << 8;
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case REVB_2W: {
+ printf_instr("REVB_2W\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint64_t input = rj_u();
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFF000000FF000000;
+ for (int i = 0; i < 4; i++) {
+ uint64_t tmp = mask & input;
+ if (i <= 1) {
+ tmp = tmp >> (24 - i * 16);
+ } else {
+ tmp = tmp << (i * 16 - 24);
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case REVB_D: {
+ printf_instr("REVB_D\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint64_t input = rj_u();
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFF00000000000000;
+ for (int i = 0; i < 8; i++) {
+ uint64_t tmp = mask & input;
+ if (i <= 3) {
+ tmp = tmp >> (56 - i * 16);
+ } else {
+ tmp = tmp << (i * 16 - 56);
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case REVH_2W: {
+ printf_instr("REVH_2W\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint64_t input = rj_u();
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFFFF000000000000;
+ for (int i = 0; i < 4; i++) {
+ uint64_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 16;
+ } else {
+ tmp = tmp << 16;
+ }
+ output = output | tmp;
+ mask = mask >> 16;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case REVH_D: {
+ printf_instr("REVH_D\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint64_t input = rj_u();
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFFFF000000000000;
+ for (int i = 0; i < 4; i++) {
+ uint64_t tmp = mask & input;
+ if (i <= 1) {
+ tmp = tmp >> (48 - i * 32);
+ } else {
+ tmp = tmp << (i * 32 - 48);
+ }
+ output = output | tmp;
+ mask = mask >> 16;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case BITREV_4B: {
+ printf_instr("BITREV_4B\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint32_t input = static_cast<uint32_t>(rj());
+ uint32_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 4; i++) {
+ output = output >> 8;
+ i_byte = input & 0xFF;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte = static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | (static_cast<uint32_t>(o_byte << 24));
+ input = input >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case BITREV_8B: {
+ printf_instr("BITREV_8B\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint64_t input = rj_u();
+ uint64_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 8; i++) {
+ output = output >> 8;
+ i_byte = input & 0xFF;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte = static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | (static_cast<uint64_t>(o_byte) << 56);
+ input = input >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case BITREV_W: {
+ printf_instr("BITREV_W\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint32_t input = static_cast<uint32_t>(rj());
+ uint32_t output = 0;
+ output = base::bits::ReverseBits(input);
+ alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case BITREV_D: {
+ printf_instr("BITREV_D\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ alu_out = static_cast<int64_t>(base::bits::ReverseBits(rj_u()));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case EXT_W_B: {
+ printf_instr("EXT_W_B\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint8_t input = static_cast<uint8_t>(rj());
+ alu_out = static_cast<int64_t>(static_cast<int8_t>(input));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case EXT_W_H: {
+ printf_instr("EXT_W_H\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint16_t input = static_cast<uint16_t>(rj());
+ alu_out = static_cast<int64_t>(static_cast<int16_t>(input));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case FABS_S:
+ printf_instr("FABS_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(), std::abs(fj_float()));
+ break;
+ case FABS_D:
+ printf_instr("FABS_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUDoubleResult(fd_reg(), std::abs(fj_double()));
+ break;
+ case FNEG_S:
+ printf_instr("FNEG_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(), -fj_float());
+ break;
+ case FNEG_D:
+ printf_instr("FNEG_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUDoubleResult(fd_reg(), -fj_double());
+ break;
+ case FSQRT_S: {
+ printf_instr("FSQRT_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ if (fj_float() >= 0) {
+ SetFPUFloatResult(fd_reg(), std::sqrt(fj_float()));
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, false);
+ } else {
+ SetFPUFloatResult(fd_reg(), std::sqrt(-1)); // qnan
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ }
+ break;
+ }
+ case FSQRT_D: {
+ printf_instr("FSQRT_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ if (fj_double() >= 0) {
+ SetFPUDoubleResult(fd_reg(), std::sqrt(fj_double()));
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, false);
+ } else {
+ SetFPUDoubleResult(fd_reg(), std::sqrt(-1)); // qnan
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ }
+ break;
+ }
+ case FMOV_S:
+ printf_instr("FMOV_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(), fj_float());
+ break;
+ case FMOV_D:
+ printf_instr("FMOV_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUDoubleResult(fd_reg(), fj_double());
+ break;
+ case MOVGR2FR_W: {
+ printf_instr("MOVGR2FR_W\t %s: %016f, %s, %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj());
+ set_fpu_register_word(fd_reg(), static_cast<int32_t>(rj()));
+ TraceRegWr(get_fpu_register(fd_reg()), FLOAT_DOUBLE);
+ break;
+ }
+ case MOVGR2FR_D:
+ printf_instr("MOVGR2FR_D\t %s: %016f, %s, %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj());
+ SetFPUResult2(fd_reg(), rj());
+ break;
+ case MOVGR2FRH_W: {
+ printf_instr("MOVGR2FRH_W\t %s: %016f, %s, %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj());
+ set_fpu_register_hi_word(fd_reg(), static_cast<int32_t>(rj()));
+ TraceRegWr(get_fpu_register(fd_reg()), DOUBLE);
+ break;
+ }
+ case MOVFR2GR_S: {
+ printf_instr("MOVFR2GR_S\t %s: %016lx, %s, %016f\n",
+ Registers::Name(rd_reg()), rd(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ set_register(rd_reg(),
+ static_cast<int64_t>(get_fpu_register_word(fj_reg())));
+ TraceRegWr(get_register(rd_reg()), WORD_DWORD);
+ break;
+ }
+ case MOVFR2GR_D:
+ printf_instr("MOVFR2GR_D\t %s: %016lx, %s, %016f\n",
+ Registers::Name(rd_reg()), rd(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetResult(rd_reg(), get_fpu_register(fj_reg()));
+ break;
+ case MOVFRH2GR_S:
+ printf_instr("MOVFRH2GR_S\t %s: %016lx, %s, %016f\n",
+ Registers::Name(rd_reg()), rd(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetResult(rd_reg(), get_fpu_register_hi_word(fj_reg()));
+ break;
+ case MOVGR2FCSR: {
+ printf_instr("MOVGR2FCSR\t fcsr: %016x, %s, %016lx\n", FCSR_,
+ Registers::Name(rj_reg()), rj());
+ // fcsr could be 0-3
+ CHECK_LT(rd_reg(), 4);
+ FCSR_ = static_cast<uint32_t>(rj());
+ TraceRegWr(FCSR_);
+ break;
+ }
+ case MOVFCSR2GR: {
+ printf_instr("MOVFCSR2GR\t %s, %016lx, FCSR: %016x\n",
+ Registers::Name(rd_reg()), rd(), FCSR_);
+ // fcsr could be 0-3
+ CHECK_LT(rj_reg(), 4);
+ SetResult(rd_reg(), FCSR_);
+ break;
+ }
+ case FCVT_S_D:
+ printf_instr("FCVT_S_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUFloatResult(fd_reg(), static_cast<float>(fj_double()));
+ break;
+ case FCVT_D_S:
+ printf_instr("FCVT_D_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUDoubleResult(fd_reg(), static_cast<double>(fj_float()));
+ break;
+ case FTINTRM_W_S: {
+ printf_instr("FTINTRM_W_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::floor(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_word_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRM_W_D: {
+ printf_instr("FTINTRM_W_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::floor(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRM_L_S: {
+ printf_instr("FTINTRM_L_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::floor(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRM_L_D: {
+ printf_instr("FTINTRM_L_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::floor(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRP_W_S: {
+ printf_instr("FTINTRP_W_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::ceil(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_word_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRP_W_D: {
+ printf_instr("FTINTRP_W_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::ceil(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRP_L_S: {
+ printf_instr("FTINTRP_L_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::ceil(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRP_L_D: {
+ printf_instr("FTINTRP_L_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::ceil(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRZ_W_S: {
+ printf_instr("FTINTRZ_W_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::trunc(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_word_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRZ_W_D: {
+ printf_instr("FTINTRZ_W_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::trunc(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRZ_L_S: {
+ printf_instr("FTINTRZ_L_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::trunc(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRZ_L_D: {
+ printf_instr("FTINTRZ_L_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::trunc(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRNE_W_S: {
+ printf_instr("FTINTRNE_W_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::floor(fj + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_word_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRNE_W_D: {
+ printf_instr("FTINTRNE_W_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::floor(fj + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRNE_L_S: {
+ printf_instr("FTINTRNE_L_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::floor(fj + 0.5);
+ int64_t result = static_cast<int64_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRNE_L_D: {
+ printf_instr("FTINTRNE_L_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::floor(fj + 0.5);
+ int64_t result = static_cast<int64_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINT_W_S: {
+ printf_instr("FTINT_W_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded;
+ int32_t result;
+ round_according_to_fcsr(fj, &rounded, &result);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_word_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINT_W_D: {
+ printf_instr("FTINT_W_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded;
+ int32_t result;
+ round_according_to_fcsr(fj, &rounded, &result);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_word_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINT_L_S: {
+ printf_instr("FTINT_L_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded;
+ int64_t result;
+ round64_according_to_fcsr(fj, &rounded, &result);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINT_L_D: {
+ printf_instr("FTINT_L_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded;
+ int64_t result;
+ round64_according_to_fcsr(fj, &rounded, &result);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FFINT_S_W: {
+ alu_out = get_fpu_register_signed_word(fj_reg());
+ printf_instr("FFINT_S_W\t %s: %016f, %s, %016x\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), static_cast<int>(alu_out));
+ SetFPUFloatResult(fd_reg(), static_cast<float>(alu_out));
+ break;
+ }
+ case FFINT_S_L: {
+ alu_out = get_fpu_register(fj_reg());
+ printf_instr("FFINT_S_L\t %s: %016f, %s, %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), alu_out);
+ SetFPUFloatResult(fd_reg(), static_cast<float>(alu_out));
+ break;
+ }
+ case FFINT_D_W: {
+ alu_out = get_fpu_register_signed_word(fj_reg());
+ printf_instr("FFINT_D_W\t %s: %016f, %s, %016x\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), static_cast<int>(alu_out));
+ SetFPUDoubleResult(fd_reg(), static_cast<double>(alu_out));
+ break;
+ }
+ case FFINT_D_L: {
+ alu_out = get_fpu_register(fj_reg());
+ printf_instr("FFINT_D_L\t %s: %016f, %s, %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), alu_out);
+ SetFPUDoubleResult(fd_reg(), static_cast<double>(alu_out));
+ break;
+ }
+ case FRINT_S: {
+ printf_instr("FRINT_S\t %s: %016f, %s, %016f mode : ",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float result, temp_result;
+ double temp;
+ float upper = std::ceil(fj);
+ float lower = std::floor(fj);
+ switch (get_fcsr_rounding_mode()) {
+ case kRoundToNearest:
+ printf_instr(" kRoundToNearest\n");
+ if (upper - fj < fj - lower) {
+ result = upper;
+ } else if (upper - fj > fj - lower) {
+ result = lower;
+ } else {
+ temp_result = upper / 2;
+ float reminder = std::modf(temp_result, &temp);
+ if (reminder == 0) {
+ result = upper;
+ } else {
+ result = lower;
+ }
+ }
+ break;
+ case kRoundToZero:
+ printf_instr(" kRoundToZero\n");
+ result = (fj > 0 ? lower : upper);
+ break;
+ case kRoundToPlusInf:
+ printf_instr(" kRoundToPlusInf\n");
+ result = upper;
+ break;
+ case kRoundToMinusInf:
+ printf_instr(" kRoundToMinusInf\n");
+ result = lower;
+ break;
+ }
+ SetFPUFloatResult(fd_reg(), result);
+ set_fcsr_bit(kFCSRInexactCauseBit, result != fj);
+ break;
+ }
+ case FRINT_D: {
+ printf_instr("FRINT_D\t %s: %016f, %s, %016f mode : ",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double result, temp, temp_result;
+ double upper = std::ceil(fj);
+ double lower = std::floor(fj);
+ switch (get_fcsr_rounding_mode()) {
+ case kRoundToNearest:
+ printf_instr(" kRoundToNearest\n");
+ if (upper - fj < fj - lower) {
+ result = upper;
+ } else if (upper - fj > fj - lower) {
+ result = lower;
+ } else {
+ temp_result = upper / 2;
+ double reminder = std::modf(temp_result, &temp);
+ if (reminder == 0) {
+ result = upper;
+ } else {
+ result = lower;
+ }
+ }
+ break;
+ case kRoundToZero:
+ printf_instr(" kRoundToZero\n");
+ result = (fj > 0 ? lower : upper);
+ break;
+ case kRoundToPlusInf:
+ printf_instr(" kRoundToPlusInf\n");
+ result = upper;
+ break;
+ case kRoundToMinusInf:
+ printf_instr(" kRoundToMinusInf\n");
+ result = lower;
+ break;
+ }
+ SetFPUDoubleResult(fd_reg(), result);
+ set_fcsr_bit(kFCSRInexactCauseBit, result != fj);
+ break;
+ }
+ case MOVFR2CF:
+ printf("Sim UNIMPLEMENTED: MOVFR2CF\n");
+ UNIMPLEMENTED();
+ case MOVCF2FR:
+ printf("Sim UNIMPLEMENTED: MOVCF2FR\n");
+ UNIMPLEMENTED();
+ case MOVGR2CF:
+ printf_instr("MOVGR2CF\t FCC%d, %s: %016lx\n", cd_reg(),
+ Registers::Name(rj_reg()), rj());
+ set_cf_register(cd_reg(), rj() & 1);
+ break;
+ case MOVCF2GR:
+ printf_instr("MOVCF2GR\t %s: %016lx, FCC%d\n", Registers::Name(rd_reg()),
+ rd(), cj_reg());
+ SetResult(rd_reg(), cj());
+ break;
+ case FRECIP_S:
+ printf("Sim UNIMPLEMENTED: FRECIP_S\n");
+ UNIMPLEMENTED();
+ case FRECIP_D:
+ printf("Sim UNIMPLEMENTED: FRECIP_D\n");
+ UNIMPLEMENTED();
+ case FRSQRT_S:
+ printf("Sim UNIMPLEMENTED: FRSQRT_S\n");
+ UNIMPLEMENTED();
+ case FRSQRT_D:
+ printf("Sim UNIMPLEMENTED: FRSQRT_D\n");
+ UNIMPLEMENTED();
+ case FCLASS_S:
+ printf("Sim UNIMPLEMENTED: FCLASS_S\n");
+ UNIMPLEMENTED();
+ case FCLASS_D:
+ printf("Sim UNIMPLEMENTED: FCLASS_D\n");
+ UNIMPLEMENTED();
+ case FLOGB_S:
+ printf("Sim UNIMPLEMENTED: FLOGB_S\n");
+ UNIMPLEMENTED();
+ case FLOGB_D:
+ printf("Sim UNIMPLEMENTED: FLOGB_D\n");
+ UNIMPLEMENTED();
+ case CLO_W:
+ printf("Sim UNIMPLEMENTED: CLO_W\n");
+ UNIMPLEMENTED();
+ case CTO_W:
+ printf("Sim UNIMPLEMENTED: CTO_W\n");
+ UNIMPLEMENTED();
+ case CLO_D:
+ printf("Sim UNIMPLEMENTED: CLO_D\n");
+ UNIMPLEMENTED();
+ case CTO_D:
+ printf("Sim UNIMPLEMENTED: CTO_D\n");
+ UNIMPLEMENTED();
+ // Unimplemented opcodes raised an error in the configuration step before,
+ // so we can use the default here to set the destination register in common
+ // cases.
+ default:
+ UNREACHABLE();
+ }
+}
+
+// Executes the current instruction.
+void Simulator::InstructionDecode(Instruction* instr) {
+ if (v8::internal::FLAG_check_icache) {
+ CheckICache(i_cache(), instr);
+ }
+ pc_modified_ = false;
+
+ v8::base::EmbeddedVector<char, 256> buffer;
+
+ if (::v8::internal::FLAG_trace_sim) {
+ base::SNPrintF(trace_buf_, " ");
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
+ }
+
+ static int instr_count = 0;
+ USE(instr_count);
+ instr_ = instr;
+ printf_instr("\nInstr%3d: %08x, PC: %016lx\t", instr_count++,
+ instr_.Bits(31, 0), get_pc());
+ switch (instr_.InstructionType()) {
+ case Instruction::kOp6Type:
+ DecodeTypeOp6();
+ break;
+ case Instruction::kOp7Type:
+ DecodeTypeOp7();
+ break;
+ case Instruction::kOp8Type:
+ DecodeTypeOp8();
+ break;
+ case Instruction::kOp10Type:
+ DecodeTypeOp10();
+ break;
+ case Instruction::kOp12Type:
+ DecodeTypeOp12();
+ break;
+ case Instruction::kOp14Type:
+ DecodeTypeOp14();
+ break;
+ case Instruction::kOp17Type:
+ DecodeTypeOp17();
+ break;
+ case Instruction::kOp22Type:
+ DecodeTypeOp22();
+ break;
+ default: {
+ printf("instr_: %x\n", instr_.Bits(31, 0));
+ UNREACHABLE();
+ }
+ }
+
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF(" 0x%08" PRIxPTR " %-44s %s\n",
+ reinterpret_cast<intptr_t>(instr), buffer.begin(),
+ trace_buf_.begin());
+ }
+
+ if (!pc_modified_) {
+ set_register(pc, reinterpret_cast<int64_t>(instr) + kInstrSize);
+ }
+}
+
+void Simulator::Execute() {
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int64_t program_counter = get_pc();
+ if (::v8::internal::FLAG_stop_sim_at == 0) {
+ // Fast version of the dispatch loop without checking whether the simulator
+ // should be stopping at a particular executed instruction.
+ while (program_counter != end_sim_pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+ icount_++;
+ InstructionDecode(instr);
+ program_counter = get_pc();
+ }
+ } else {
+ // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
+ // we reach the particular instruction count.
+ while (program_counter != end_sim_pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+ icount_++;
+ if (icount_ == static_cast<int64_t>(::v8::internal::FLAG_stop_sim_at)) {
+ Loong64Debugger dbg(this);
+ dbg.Debug();
+ } else {
+ InstructionDecode(instr);
+ }
+ program_counter = get_pc();
+ }
+ }
+}
+
+void Simulator::CallInternal(Address entry) {
+ // Adjust JS-based stack limit to C-based stack limit.
+ isolate_->stack_guard()->AdjustStackLimitForSimulator();
+
+ // Prepare to execute the code at entry.
+ set_register(pc, static_cast<int64_t>(entry));
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ set_register(ra, end_sim_pc);
+
+ // Remember the values of callee-saved registers.
+ int64_t s0_val = get_register(s0);
+ int64_t s1_val = get_register(s1);
+ int64_t s2_val = get_register(s2);
+ int64_t s3_val = get_register(s3);
+ int64_t s4_val = get_register(s4);
+ int64_t s5_val = get_register(s5);
+ int64_t s6_val = get_register(s6);
+ int64_t s7_val = get_register(s7);
+ int64_t s8_val = get_register(s8);
+ int64_t gp_val = get_register(gp);
+ int64_t sp_val = get_register(sp);
+ int64_t tp_val = get_register(tp);
+ int64_t fp_val = get_register(fp);
+
+ // Set up the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int64_t callee_saved_value = icount_;
+ set_register(s0, callee_saved_value);
+ set_register(s1, callee_saved_value);
+ set_register(s2, callee_saved_value);
+ set_register(s3, callee_saved_value);
+ set_register(s4, callee_saved_value);
+ set_register(s5, callee_saved_value);
+ set_register(s6, callee_saved_value);
+ set_register(s7, callee_saved_value);
+ set_register(s8, callee_saved_value);
+ set_register(gp, callee_saved_value);
+ set_register(tp, callee_saved_value);
+ set_register(fp, callee_saved_value);
+
+ // Start the simulation.
+ Execute();
+
+ // Check that the callee-saved registers have been preserved.
+ CHECK_EQ(callee_saved_value, get_register(s0));
+ CHECK_EQ(callee_saved_value, get_register(s1));
+ CHECK_EQ(callee_saved_value, get_register(s2));
+ CHECK_EQ(callee_saved_value, get_register(s3));
+ CHECK_EQ(callee_saved_value, get_register(s4));
+ CHECK_EQ(callee_saved_value, get_register(s5));
+ CHECK_EQ(callee_saved_value, get_register(s6));
+ CHECK_EQ(callee_saved_value, get_register(s7));
+ CHECK_EQ(callee_saved_value, get_register(s8));
+ CHECK_EQ(callee_saved_value, get_register(gp));
+ CHECK_EQ(callee_saved_value, get_register(tp));
+ CHECK_EQ(callee_saved_value, get_register(fp));
+
+ // Restore callee-saved registers with the original value.
+ set_register(s0, s0_val);
+ set_register(s1, s1_val);
+ set_register(s2, s2_val);
+ set_register(s3, s3_val);
+ set_register(s4, s4_val);
+ set_register(s5, s5_val);
+ set_register(s6, s6_val);
+ set_register(s7, s7_val);
+ set_register(s8, s8_val);
+ set_register(gp, gp_val);
+ set_register(sp, sp_val);
+ set_register(tp, tp_val);
+ set_register(fp, fp_val);
+}
+
+intptr_t Simulator::CallImpl(Address entry, int argument_count,
+ const intptr_t* arguments) {
+ constexpr int kRegisterPassedArguments = 8;
+ // Set up arguments.
+
+ int reg_arg_count = std::min(kRegisterPassedArguments, argument_count);
+ if (reg_arg_count > 0) set_register(a0, arguments[0]);
+ if (reg_arg_count > 1) set_register(a1, arguments[1]);
+ if (reg_arg_count > 2) set_register(a2, arguments[2]);
+ if (reg_arg_count > 3) set_register(a3, arguments[3]);
+ if (reg_arg_count > 4) set_register(a4, arguments[4]);
+ if (reg_arg_count > 5) set_register(a5, arguments[5]);
+ if (reg_arg_count > 6) set_register(a6, arguments[6]);
+ if (reg_arg_count > 7) set_register(a7, arguments[7]);
+
+ // Remaining arguments passed on stack.
+ int64_t original_stack = get_register(sp);
+ // Compute position of stack on entry to generated code.
+ int stack_args_count = argument_count - reg_arg_count;
+ int stack_args_size = stack_args_count * sizeof(*arguments);
+ int64_t entry_stack = original_stack - stack_args_size;
+
+ if (base::OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -base::OS::ActivationFrameAlignment();
+ }
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+ memcpy(stack_argument, arguments + reg_arg_count,
+ stack_args_count * sizeof(*arguments));
+ set_register(sp, entry_stack);
+
+ CallInternal(entry);
+
+ // Pop stack passed arguments.
+ CHECK_EQ(entry_stack, get_register(sp));
+ set_register(sp, original_stack);
+
+ return get_register(a0);
+}
+
+double Simulator::CallFP(Address entry, double d0, double d1) {
+ const FPURegister fparg2 = f1;
+ set_fpu_register_double(f0, d0);
+ set_fpu_register_double(fparg2, d1);
+ CallInternal(entry);
+ return get_fpu_register_double(f0);
+}
+
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+ int64_t new_sp = get_register(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ set_register(sp, new_sp);
+ return new_sp;
+}
+
+uintptr_t Simulator::PopAddress() {
+ int64_t current_sp = get_register(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ set_register(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+Simulator::LocalMonitor::LocalMonitor()
+ : access_state_(MonitorAccess::Open),
+ tagged_addr_(0),
+ size_(TransactionSize::None) {}
+
+void Simulator::LocalMonitor::Clear() {
+ access_state_ = MonitorAccess::Open;
+ tagged_addr_ = 0;
+ size_ = TransactionSize::None;
+}
+
+void Simulator::LocalMonitor::NotifyLoad() {
+ if (access_state_ == MonitorAccess::RMW) {
+ // A non linked load could clear the local monitor. As a result, it's
+ // most strict to unconditionally clear the local monitor on load.
+ Clear();
+ }
+}
+
+void Simulator::LocalMonitor::NotifyLoadLinked(uintptr_t addr,
+ TransactionSize size) {
+ access_state_ = MonitorAccess::RMW;
+ tagged_addr_ = addr;
+ size_ = size;
+}
+
+void Simulator::LocalMonitor::NotifyStore() {
+ if (access_state_ == MonitorAccess::RMW) {
+ // A non exclusive store could clear the local monitor. As a result, it's
+ // most strict to unconditionally clear the local monitor on store.
+ Clear();
+ }
+}
+
+bool Simulator::LocalMonitor::NotifyStoreConditional(uintptr_t addr,
+ TransactionSize size) {
+ if (access_state_ == MonitorAccess::RMW) {
+ if (addr == tagged_addr_ && size_ == size) {
+ Clear();
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ DCHECK(access_state_ == MonitorAccess::Open);
+ return false;
+ }
+}
+
+Simulator::GlobalMonitor::LinkedAddress::LinkedAddress()
+ : access_state_(MonitorAccess::Open),
+ tagged_addr_(0),
+ next_(nullptr),
+ prev_(nullptr),
+ failure_counter_(0) {}
+
+void Simulator::GlobalMonitor::LinkedAddress::Clear_Locked() {
+ access_state_ = MonitorAccess::Open;
+ tagged_addr_ = 0;
+}
+
+void Simulator::GlobalMonitor::LinkedAddress::NotifyLoadLinked_Locked(
+ uintptr_t addr) {
+ access_state_ = MonitorAccess::RMW;
+ tagged_addr_ = addr;
+}
+
+void Simulator::GlobalMonitor::LinkedAddress::NotifyStore_Locked() {
+ if (access_state_ == MonitorAccess::RMW) {
+ // A non exclusive store could clear the global monitor. As a result, it's
+ // most strict to unconditionally clear global monitors on store.
+ Clear_Locked();
+ }
+}
+
+bool Simulator::GlobalMonitor::LinkedAddress::NotifyStoreConditional_Locked(
+ uintptr_t addr, bool is_requesting_thread) {
+ if (access_state_ == MonitorAccess::RMW) {
+ if (is_requesting_thread) {
+ if (addr == tagged_addr_) {
+ Clear_Locked();
+ // Introduce occasional sc/scd failures. This is to simulate the
+ // behavior of hardware, which can randomly fail due to background
+ // cache evictions.
+ if (failure_counter_++ >= kMaxFailureCounter) {
+ failure_counter_ = 0;
+ return false;
+ } else {
+ return true;
+ }
+ }
+ } else if ((addr & kExclusiveTaggedAddrMask) ==
+ (tagged_addr_ & kExclusiveTaggedAddrMask)) {
+ // Check the masked addresses when responding to a successful lock by
+ // another thread so the implementation is more conservative (i.e. the
+ // granularity of locking is as large as possible.)
+ Clear_Locked();
+ return false;
+ }
+ }
+ return false;
+}
+
+void Simulator::GlobalMonitor::NotifyLoadLinked_Locked(
+ uintptr_t addr, LinkedAddress* linked_address) {
+ linked_address->NotifyLoadLinked_Locked(addr);
+ PrependProcessor_Locked(linked_address);
+}
+
+void Simulator::GlobalMonitor::NotifyStore_Locked(
+ LinkedAddress* linked_address) {
+ // Notify each thread of the store operation.
+ for (LinkedAddress* iter = head_; iter; iter = iter->next_) {
+ iter->NotifyStore_Locked();
+ }
+}
+
+bool Simulator::GlobalMonitor::NotifyStoreConditional_Locked(
+ uintptr_t addr, LinkedAddress* linked_address) {
+ DCHECK(IsProcessorInLinkedList_Locked(linked_address));
+ if (linked_address->NotifyStoreConditional_Locked(addr, true)) {
+ // Notify the other processors that this StoreConditional succeeded.
+ for (LinkedAddress* iter = head_; iter; iter = iter->next_) {
+ if (iter != linked_address) {
+ iter->NotifyStoreConditional_Locked(addr, false);
+ }
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
+ LinkedAddress* linked_address) const {
+ return head_ == linked_address || linked_address->next_ ||
+ linked_address->prev_;
+}
+
+void Simulator::GlobalMonitor::PrependProcessor_Locked(
+ LinkedAddress* linked_address) {
+ if (IsProcessorInLinkedList_Locked(linked_address)) {
+ return;
+ }
+
+ if (head_) {
+ head_->prev_ = linked_address;
+ }
+ linked_address->prev_ = nullptr;
+ linked_address->next_ = head_;
+ head_ = linked_address;
+}
+
+void Simulator::GlobalMonitor::RemoveLinkedAddress(
+ LinkedAddress* linked_address) {
+ base::MutexGuard lock_guard(&mutex);
+ if (!IsProcessorInLinkedList_Locked(linked_address)) {
+ return;
+ }
+
+ if (linked_address->prev_) {
+ linked_address->prev_->next_ = linked_address->next_;
+ } else {
+ head_ = linked_address->next_;
+ }
+ if (linked_address->next_) {
+ linked_address->next_->prev_ = linked_address->prev_;
+ }
+ linked_address->prev_ = nullptr;
+ linked_address->next_ = nullptr;
+}
+
+#undef SScanF
+
+} // namespace internal
+} // namespace v8
+
+#endif // USE_SIMULATOR
diff --git a/deps/v8/src/execution/loong64/simulator-loong64.h b/deps/v8/src/execution/loong64/simulator-loong64.h
new file mode 100644
index 0000000000..b9e97b93b2
--- /dev/null
+++ b/deps/v8/src/execution/loong64/simulator-loong64.h
@@ -0,0 +1,647 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Declares a Simulator for loongisa instructions if we are not generating a
+// native loongisa binary. This Simulator allows us to run and debug loongisa
+// code generation on regular desktop machines. V8 calls into generated code via
+// the GeneratedCode wrapper, which will start execution in the Simulator or
+// forwards to the real entry on a loongisa HW platform.
+
+#ifndef V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_
+#define V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_
+
+// globals.h defines USE_SIMULATOR.
+#include "src/common/globals.h"
+
+template <typename T>
+int Compare(const T& a, const T& b) {
+ if (a == b)
+ return 0;
+ else if (a < b)
+ return -1;
+ else
+ return 1;
+}
+
+// Returns the negative absolute value of its argument.
+template <typename T,
+ typename = typename std::enable_if<std::is_signed<T>::value>::type>
+T Nabs(T a) {
+ return a < 0 ? a : -a;
+}
+
+#if defined(USE_SIMULATOR)
+// Running with a simulator.
+
+#include "src/base/hashmap.h"
+#include "src/base/strings.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/execution/simulator-base.h"
+#include "src/utils/allocation.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
+
+ char* ValidityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+
+ char* CachedData(int offset) { return &data_[offset]; }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+class SimInstructionBase : public InstructionBase {
+ public:
+ Type InstructionType() const { return type_; }
+ inline Instruction* instr() const { return instr_; }
+ inline int32_t operand() const { return operand_; }
+
+ protected:
+ SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {}
+ explicit SimInstructionBase(Instruction* instr) {}
+
+ int32_t operand_;
+ Instruction* instr_;
+ Type type_;
+
+ private:
+ DISALLOW_ASSIGN(SimInstructionBase);
+};
+
+class SimInstruction : public InstructionGetters<SimInstructionBase> {
+ public:
+ SimInstruction() {}
+
+ explicit SimInstruction(Instruction* instr) { *this = instr; }
+
+ SimInstruction& operator=(Instruction* instr) {
+ operand_ = *reinterpret_cast<const int32_t*>(instr);
+ instr_ = instr;
+ type_ = InstructionBase::InstructionType();
+ DCHECK(reinterpret_cast<void*>(&operand_) == this);
+ return *this;
+ }
+};
+
+class Simulator : public SimulatorBase {
+ public:
+ friend class Loong64Debugger;
+
+ // Registers are declared in order.
+ enum Register {
+ no_reg = -1,
+ zero_reg = 0,
+ ra,
+ gp,
+ sp,
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ t0,
+ t1,
+ t2,
+ t3,
+ t4,
+ t5,
+ t6,
+ t7,
+ t8,
+ tp,
+ fp,
+ s0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ s8,
+ pc, // pc must be the last register.
+ kNumSimuRegisters,
+ // aliases
+ v0 = a0,
+ v1 = a1
+ };
+
+ // Condition flag registers.
+ enum CFRegister {
+ fcc0,
+ fcc1,
+ fcc2,
+ fcc3,
+ fcc4,
+ fcc5,
+ fcc6,
+ fcc7,
+ kNumCFRegisters
+ };
+
+ // Floating point registers.
+ enum FPURegister {
+ f0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ kNumFPURegisters
+ };
+
+ explicit Simulator(Isolate* isolate);
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
+
+ // Accessors for register state. Reading the pc value adheres to the LOONG64
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void set_register(int reg, int64_t value);
+ void set_register_word(int reg, int32_t value);
+ void set_dw_register(int dreg, const int* dbl);
+ V8_EXPORT_PRIVATE int64_t get_register(int reg) const;
+ double get_double_from_register_pair(int reg);
+ // Same for FPURegisters.
+ void set_fpu_register(int fpureg, int64_t value);
+ void set_fpu_register_word(int fpureg, int32_t value);
+ void set_fpu_register_hi_word(int fpureg, int32_t value);
+ void set_fpu_register_float(int fpureg, float value);
+ void set_fpu_register_double(int fpureg, double value);
+ void set_fpu_register_invalid_result64(float original, float rounded);
+ void set_fpu_register_invalid_result(float original, float rounded);
+ void set_fpu_register_word_invalid_result(float original, float rounded);
+ void set_fpu_register_invalid_result64(double original, double rounded);
+ void set_fpu_register_invalid_result(double original, double rounded);
+ void set_fpu_register_word_invalid_result(double original, double rounded);
+ int64_t get_fpu_register(int fpureg) const;
+ int32_t get_fpu_register_word(int fpureg) const;
+ int32_t get_fpu_register_signed_word(int fpureg) const;
+ int32_t get_fpu_register_hi_word(int fpureg) const;
+ float get_fpu_register_float(int fpureg) const;
+ double get_fpu_register_double(int fpureg) const;
+ void set_cf_register(int cfreg, bool value);
+ bool get_cf_register(int cfreg) const;
+ void set_fcsr_rounding_mode(FPURoundingMode mode);
+ unsigned int get_fcsr_rounding_mode();
+ void set_fcsr_bit(uint32_t cc, bool value);
+ bool test_fcsr_bit(uint32_t cc);
+ bool set_fcsr_round_error(double original, double rounded);
+ bool set_fcsr_round64_error(double original, double rounded);
+ bool set_fcsr_round_error(float original, float rounded);
+ bool set_fcsr_round64_error(float original, float rounded);
+ void round_according_to_fcsr(double toRound, double* rounded,
+ int32_t* rounded_int);
+ void round64_according_to_fcsr(double toRound, double* rounded,
+ int64_t* rounded_int);
+ void round_according_to_fcsr(float toRound, float* rounded,
+ int32_t* rounded_int);
+ void round64_according_to_fcsr(float toRound, float* rounded,
+ int64_t* rounded_int);
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int64_t value);
+ V8_EXPORT_PRIVATE int64_t get_pc() const;
+
+ Address get_sp() const { return static_cast<Address>(get_register(sp)); }
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t StackLimit(uintptr_t c_limit) const;
+
+ // Executes LOONG64 instructions until the PC reaches end_sim_pc.
+ void Execute();
+
+ template <typename Return, typename... Args>
+ Return Call(Address entry, Args... args) {
+ return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
+ }
+
+ // Alternative: call a 2-argument double function.
+ double CallFP(Address entry, double d0, double d1);
+
+ // Push an address onto the JS stack.
+ uintptr_t PushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t PopAddress();
+
+ // Debugger input.
+ void set_last_debugger_input(char* input);
+ char* last_debugger_input() { return last_debugger_input_; }
+
+ // Redirection support.
+ static void SetRedirectInstruction(Instruction* instruction);
+
+ // ICache checking.
+ static bool ICacheMatch(void* one, void* two);
+ static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
+ size_t size);
+
+ // Returns true if pc register contains one of the 'special_values' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum special_values {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_ra = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the ra is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2,
+ // Unpredictable value.
+ Unpredictable = 0xbadbeaf
+ };
+
+ V8_EXPORT_PRIVATE intptr_t CallImpl(Address entry, int argument_count,
+ const intptr_t* arguments);
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void Format(Instruction* instr, const char* format);
+
+ // Helpers for data value tracing.
+ enum TraceType {
+ BYTE,
+ HALF,
+ WORD,
+ DWORD,
+ FLOAT,
+ DOUBLE,
+ FLOAT_DOUBLE,
+ WORD_DWORD
+ };
+
+ // Read and write memory.
+ inline uint32_t ReadBU(int64_t addr);
+ inline int32_t ReadB(int64_t addr);
+ inline void WriteB(int64_t addr, uint8_t value);
+ inline void WriteB(int64_t addr, int8_t value);
+
+ inline uint16_t ReadHU(int64_t addr, Instruction* instr);
+ inline int16_t ReadH(int64_t addr, Instruction* instr);
+ // Note: Overloaded on the sign of the value.
+ inline void WriteH(int64_t addr, uint16_t value, Instruction* instr);
+ inline void WriteH(int64_t addr, int16_t value, Instruction* instr);
+
+ inline uint32_t ReadWU(int64_t addr, Instruction* instr);
+ inline int32_t ReadW(int64_t addr, Instruction* instr, TraceType t = WORD);
+ inline void WriteW(int64_t addr, int32_t value, Instruction* instr);
+ void WriteConditionalW(int64_t addr, int32_t value, Instruction* instr,
+ int32_t rt_reg);
+ inline int64_t Read2W(int64_t addr, Instruction* instr);
+ inline void Write2W(int64_t addr, int64_t value, Instruction* instr);
+ inline void WriteConditional2W(int64_t addr, int64_t value,
+ Instruction* instr, int32_t rt_reg);
+
+ inline double ReadD(int64_t addr, Instruction* instr);
+ inline void WriteD(int64_t addr, double value, Instruction* instr);
+
+ template <typename T>
+ T ReadMem(int64_t addr, Instruction* instr);
+ template <typename T>
+ void WriteMem(int64_t addr, T value, Instruction* instr);
+
+ // Helper for debugging memory access.
+ inline void DieOrDebug();
+
+ void TraceRegWr(int64_t value, TraceType t = DWORD);
+ void TraceMemWr(int64_t addr, int64_t value, TraceType t);
+ void TraceMemRd(int64_t addr, int64_t value, TraceType t = DWORD);
+ template <typename T>
+ void TraceMemRd(int64_t addr, T value);
+ template <typename T>
+ void TraceMemWr(int64_t addr, T value);
+
+ SimInstruction instr_;
+
+ // Executing is handled based on the instruction type.
+ void DecodeTypeOp6();
+ void DecodeTypeOp7();
+ void DecodeTypeOp8();
+ void DecodeTypeOp10();
+ void DecodeTypeOp12();
+ void DecodeTypeOp14();
+ void DecodeTypeOp17();
+ void DecodeTypeOp22();
+
+ inline int32_t rj_reg() const { return instr_.RjValue(); }
+ inline int64_t rj() const { return get_register(rj_reg()); }
+ inline uint64_t rj_u() const {
+ return static_cast<uint64_t>(get_register(rj_reg()));
+ }
+ inline int32_t rk_reg() const { return instr_.RkValue(); }
+ inline int64_t rk() const { return get_register(rk_reg()); }
+ inline uint64_t rk_u() const {
+ return static_cast<uint64_t>(get_register(rk_reg()));
+ }
+ inline int32_t rd_reg() const { return instr_.RdValue(); }
+ inline int64_t rd() const { return get_register(rd_reg()); }
+ inline uint64_t rd_u() const {
+ return static_cast<uint64_t>(get_register(rd_reg()));
+ }
+ inline int32_t fa_reg() const { return instr_.FaValue(); }
+ inline float fa_float() const { return get_fpu_register_float(fa_reg()); }
+ inline double fa_double() const { return get_fpu_register_double(fa_reg()); }
+ inline int32_t fj_reg() const { return instr_.FjValue(); }
+ inline float fj_float() const { return get_fpu_register_float(fj_reg()); }
+ inline double fj_double() const { return get_fpu_register_double(fj_reg()); }
+ inline int32_t fk_reg() const { return instr_.FkValue(); }
+ inline float fk_float() const { return get_fpu_register_float(fk_reg()); }
+ inline double fk_double() const { return get_fpu_register_double(fk_reg()); }
+ inline int32_t fd_reg() const { return instr_.FdValue(); }
+ inline float fd_float() const { return get_fpu_register_float(fd_reg()); }
+ inline double fd_double() const { return get_fpu_register_double(fd_reg()); }
+ inline int32_t cj_reg() const { return instr_.CjValue(); }
+ inline bool cj() const { return get_cf_register(cj_reg()); }
+ inline int32_t cd_reg() const { return instr_.CdValue(); }
+ inline bool cd() const { return get_cf_register(cd_reg()); }
+ inline int32_t ca_reg() const { return instr_.CaValue(); }
+ inline bool ca() const { return get_cf_register(ca_reg()); }
+ inline uint32_t sa2() const { return instr_.Sa2Value(); }
+ inline uint32_t sa3() const { return instr_.Sa3Value(); }
+ inline uint32_t ui5() const { return instr_.Ui5Value(); }
+ inline uint32_t ui6() const { return instr_.Ui6Value(); }
+ inline uint32_t lsbw() const { return instr_.LsbwValue(); }
+ inline uint32_t msbw() const { return instr_.MsbwValue(); }
+ inline uint32_t lsbd() const { return instr_.LsbdValue(); }
+ inline uint32_t msbd() const { return instr_.MsbdValue(); }
+ inline uint32_t cond() const { return instr_.CondValue(); }
+ inline int32_t si12() const { return (instr_.Si12Value() << 20) >> 20; }
+ inline uint32_t ui12() const { return instr_.Ui12Value(); }
+ inline int32_t si14() const { return (instr_.Si14Value() << 18) >> 18; }
+ inline int32_t si16() const { return (instr_.Si16Value() << 16) >> 16; }
+ inline int32_t si20() const { return (instr_.Si20Value() << 12) >> 12; }
+
+ inline void SetResult(const int32_t rd_reg, const int64_t alu_out) {
+ set_register(rd_reg, alu_out);
+ TraceRegWr(alu_out);
+ }
+
+ inline void SetFPUWordResult(int32_t fd_reg, int32_t alu_out) {
+ set_fpu_register_word(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg), WORD);
+ }
+
+ inline void SetFPUWordResult2(int32_t fd_reg, int32_t alu_out) {
+ set_fpu_register_word(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg));
+ }
+
+ inline void SetFPUResult(int32_t fd_reg, int64_t alu_out) {
+ set_fpu_register(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg));
+ }
+
+ inline void SetFPUResult2(int32_t fd_reg, int64_t alu_out) {
+ set_fpu_register(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
+ }
+
+ inline void SetFPUFloatResult(int32_t fd_reg, float alu_out) {
+ set_fpu_register_float(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg), FLOAT);
+ }
+
+ inline void SetFPUDoubleResult(int32_t fd_reg, double alu_out) {
+ set_fpu_register_double(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
+ }
+
+ // Used for breakpoints.
+ void SoftwareInterrupt();
+
+ // Stop helper functions.
+ bool IsWatchpoint(uint64_t code);
+ void PrintWatchpoint(uint64_t code);
+ void HandleStop(uint64_t code, Instruction* instr);
+ bool IsStopInstruction(Instruction* instr);
+ bool IsEnabledStop(uint64_t code);
+ void EnableStop(uint64_t code);
+ void DisableStop(uint64_t code);
+ void IncreaseStopCounter(uint64_t code);
+ void PrintStopInfo(uint64_t code);
+
+ // Executes one instruction.
+ void InstructionDecode(Instruction* instr);
+ // Execute one instruction placed in a branch delay slot.
+
+ // ICache.
+ static void CheckICache(base::CustomMatcherHashMap* i_cache,
+ Instruction* instr);
+ static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
+ size_t size);
+ static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
+ void* page);
+
+ enum Exception {
+ none,
+ kIntegerOverflow,
+ kIntegerUnderflow,
+ kDivideByZero,
+ kNumExceptions
+ };
+
+ // Exceptions.
+ void SignalException(Exception e);
+
+ // Handle arguments and return value for runtime FP functions.
+ void GetFpArgs(double* x, double* y, int32_t* z);
+ void SetFpResult(const double& result);
+
+ void CallInternal(Address entry);
+
+ // Architecture state.
+ // Registers.
+ int64_t registers_[kNumSimuRegisters];
+ // Floating point Registers.
+ int64_t FPUregisters_[kNumFPURegisters];
+ // Condition flags Registers.
+ bool CFregisters_[kNumCFRegisters];
+ // FPU control register.
+ uint32_t FCSR_;
+
+ // Simulator support.
+ // Allocate 1MB for stack.
+ size_t stack_size_;
+ char* stack_;
+ bool pc_modified_;
+ int64_t icount_;
+ int break_count_;
+ base::EmbeddedVector<char, 128> trace_buf_;
+
+ // Debugger input.
+ char* last_debugger_input_;
+
+ v8::internal::Isolate* isolate_;
+
+ // Registered breakpoints.
+ Instruction* break_pc_;
+ Instr break_instr_;
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1 << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watched_stops_[code].count is unset.
+ // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count;
+ char* desc;
+ };
+ StopCountAndDesc watched_stops_[kMaxStopCode + 1];
+
+ // Synchronization primitives.
+ enum class MonitorAccess {
+ Open,
+ RMW,
+ };
+
+ enum class TransactionSize {
+ None = 0,
+ Word = 4,
+ DoubleWord = 8,
+ };
+
+ // The least-significant bits of the address are ignored. The number of bits
+ // is implementation-defined, between 3 and minimum page size.
+ static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 3) - 1);
+
+ class LocalMonitor {
+ public:
+ LocalMonitor();
+
+ // These functions manage the state machine for the local monitor, but do
+ // not actually perform loads and stores. NotifyStoreConditional only
+ // returns true if the store conditional is allowed; the global monitor will
+ // still have to be checked to see whether the memory should be updated.
+ void NotifyLoad();
+ void NotifyLoadLinked(uintptr_t addr, TransactionSize size);
+ void NotifyStore();
+ bool NotifyStoreConditional(uintptr_t addr, TransactionSize size);
+
+ private:
+ void Clear();
+
+ MonitorAccess access_state_;
+ uintptr_t tagged_addr_;
+ TransactionSize size_;
+ };
+
+ class GlobalMonitor {
+ public:
+ class LinkedAddress {
+ public:
+ LinkedAddress();
+
+ private:
+ friend class GlobalMonitor;
+ // These functions manage the state machine for the global monitor, but do
+ // not actually perform loads and stores.
+ void Clear_Locked();
+ void NotifyLoadLinked_Locked(uintptr_t addr);
+ void NotifyStore_Locked();
+ bool NotifyStoreConditional_Locked(uintptr_t addr,
+ bool is_requesting_thread);
+
+ MonitorAccess access_state_;
+ uintptr_t tagged_addr_;
+ LinkedAddress* next_;
+ LinkedAddress* prev_;
+ // A scd can fail due to background cache evictions. Rather than
+ // simulating this, we'll just occasionally introduce cases where an
+ // store conditional fails. This will happen once after every
+ // kMaxFailureCounter exclusive stores.
+ static const int kMaxFailureCounter = 5;
+ int failure_counter_;
+ };
+
+ // Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
+ base::Mutex mutex;
+
+ void NotifyLoadLinked_Locked(uintptr_t addr, LinkedAddress* linked_address);
+ void NotifyStore_Locked(LinkedAddress* linked_address);
+ bool NotifyStoreConditional_Locked(uintptr_t addr,
+ LinkedAddress* linked_address);
+
+ // Called when the simulator is destroyed.
+ void RemoveLinkedAddress(LinkedAddress* linked_address);
+
+ static GlobalMonitor* Get();
+
+ private:
+ // Private constructor. Call {GlobalMonitor::Get()} to get the singleton.
+ GlobalMonitor() = default;
+ friend class base::LeakyObject<GlobalMonitor>;
+
+ bool IsProcessorInLinkedList_Locked(LinkedAddress* linked_address) const;
+ void PrependProcessor_Locked(LinkedAddress* linked_address);
+
+ LinkedAddress* head_ = nullptr;
+ };
+
+ LocalMonitor local_monitor_;
+ GlobalMonitor::LinkedAddress global_monitor_thread_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // defined(USE_SIMULATOR)
+#endif // V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index ad530e1f2a..2628e7a673 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -106,55 +106,55 @@ void MessageHandler::ReportMessage(Isolate* isolate, const MessageLocation* loc,
Handle<JSMessageObject> message) {
v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
- if (api_message_obj->ErrorLevel() == v8::Isolate::kMessageError) {
- // We are calling into embedder's code which can throw exceptions.
- // Thus we need to save current exception state, reset it to the clean one
- // and ignore scheduled exceptions callbacks can throw.
+ if (api_message_obj->ErrorLevel() != v8::Isolate::kMessageError) {
+ ReportMessageNoExceptions(isolate, loc, message, v8::Local<v8::Value>());
+ return;
+ }
- // We pass the exception object into the message handler callback though.
- Object exception_object = ReadOnlyRoots(isolate).undefined_value();
- if (isolate->has_pending_exception()) {
- exception_object = isolate->pending_exception();
- }
- Handle<Object> exception(exception_object, isolate);
+ // We are calling into embedder's code which can throw exceptions.
+ // Thus we need to save current exception state, reset it to the clean one
+ // and ignore scheduled exceptions callbacks can throw.
- Isolate::ExceptionScope exception_scope(isolate);
- isolate->clear_pending_exception();
- isolate->set_external_caught_exception(false);
+ // We pass the exception object into the message handler callback though.
+ Object exception_object = ReadOnlyRoots(isolate).undefined_value();
+ if (isolate->has_pending_exception()) {
+ exception_object = isolate->pending_exception();
+ }
+ Handle<Object> exception(exception_object, isolate);
- // Turn the exception on the message into a string if it is an object.
- if (message->argument().IsJSObject()) {
- HandleScope scope(isolate);
- Handle<Object> argument(message->argument(), isolate);
+ Isolate::ExceptionScope exception_scope(isolate);
+ isolate->clear_pending_exception();
+ isolate->set_external_caught_exception(false);
- MaybeHandle<Object> maybe_stringified;
- Handle<Object> stringified;
- // Make sure we don't leak uncaught internally generated Error objects.
- if (argument->IsJSError()) {
- maybe_stringified = Object::NoSideEffectsToString(isolate, argument);
- } else {
- v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
- catcher.SetVerbose(false);
- catcher.SetCaptureMessage(false);
+ // Turn the exception on the message into a string if it is an object.
+ if (message->argument().IsJSObject()) {
+ HandleScope scope(isolate);
+ Handle<Object> argument(message->argument(), isolate);
- maybe_stringified = Object::ToString(isolate, argument);
- }
+ MaybeHandle<Object> maybe_stringified;
+ Handle<Object> stringified;
+ // Make sure we don't leak uncaught internally generated Error objects.
+ if (argument->IsJSError()) {
+ maybe_stringified = Object::NoSideEffectsToString(isolate, argument);
+ } else {
+ v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
+ catcher.SetVerbose(false);
+ catcher.SetCaptureMessage(false);
- if (!maybe_stringified.ToHandle(&stringified)) {
- DCHECK(isolate->has_pending_exception());
- isolate->clear_pending_exception();
- isolate->set_external_caught_exception(false);
- stringified =
- isolate->factory()->NewStringFromAsciiChecked("exception");
- }
- message->set_argument(*stringified);
+ maybe_stringified = Object::ToString(isolate, argument);
}
- v8::Local<v8::Value> api_exception_obj = v8::Utils::ToLocal(exception);
- ReportMessageNoExceptions(isolate, loc, message, api_exception_obj);
- } else {
- ReportMessageNoExceptions(isolate, loc, message, v8::Local<v8::Value>());
+ if (!maybe_stringified.ToHandle(&stringified)) {
+ DCHECK(isolate->has_pending_exception());
+ isolate->clear_pending_exception();
+ isolate->set_external_caught_exception(false);
+ stringified = isolate->factory()->exception_string();
+ }
+ message->set_argument(*stringified);
}
+
+ v8::Local<v8::Value> api_exception_obj = v8::Utils::ToLocal(exception);
+ ReportMessageNoExceptions(isolate, loc, message, api_exception_obj);
}
void MessageHandler::ReportMessageNoExceptions(
@@ -297,10 +297,14 @@ class V8_NODISCARD PrepareStackTraceScope {
MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
Handle<JSObject> error,
Handle<Object> raw_stack) {
+ if (FLAG_correctness_fuzzer_suppressions) {
+ return isolate->factory()->empty_string();
+ }
DCHECK(raw_stack->IsFixedArray());
Handle<FixedArray> elems = Handle<FixedArray>::cast(raw_stack);
const bool in_recursion = isolate->formatting_stack_trace();
+ const bool has_overflowed = i::StackLimitCheck{isolate}.HasOverflowed();
Handle<Context> error_context;
if (!in_recursion && error->GetCreationContext().ToHandle(&error_context)) {
DCHECK(error_context->IsNativeContext());
@@ -318,7 +322,7 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
isolate->RunPrepareStackTraceCallback(error_context, error, sites),
Object);
return result;
- } else {
+ } else if (!has_overflowed) {
Handle<JSFunction> global_error =
handle(error_context->error_function(), isolate);
@@ -359,7 +363,6 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
}
// Otherwise, run our internal formatting logic.
-
IncrementalStringBuilder builder(isolate);
RETURN_ON_EXCEPTION(isolate, AppendErrorString(isolate, error, &builder),
diff --git a/deps/v8/src/execution/messages.h b/deps/v8/src/execution/messages.h
index a945b82299..5a54279647 100644
--- a/deps/v8/src/execution/messages.h
+++ b/deps/v8/src/execution/messages.h
@@ -12,6 +12,7 @@
#include <memory>
+#include "include/v8-local-handle.h"
#include "src/base/optional.h"
#include "src/common/message-template.h"
#include "src/handles/handles.h"
diff --git a/deps/v8/src/execution/microtask-queue.h b/deps/v8/src/execution/microtask-queue.h
index e9d40a924f..6091fa3575 100644
--- a/deps/v8/src/execution/microtask-queue.h
+++ b/deps/v8/src/execution/microtask-queue.h
@@ -6,11 +6,12 @@
#define V8_EXECUTION_MICROTASK_QUEUE_H_
#include <stdint.h>
+
#include <memory>
#include <vector>
#include "include/v8-internal.h" // For Address.
-#include "include/v8.h"
+#include "include/v8-microtask-queue.h"
#include "src/base/macros.h"
namespace v8 {
diff --git a/deps/v8/src/execution/mips/simulator-mips.cc b/deps/v8/src/execution/mips/simulator-mips.cc
index c49172a564..64ef946b2d 100644
--- a/deps/v8/src/execution/mips/simulator-mips.cc
+++ b/deps/v8/src/execution/mips/simulator-mips.cc
@@ -150,7 +150,6 @@ bool MipsDebugger::GetValue(const char* desc, int32_t* value) {
} else {
return SScanF(desc, "%i", value) == 1;
}
- return false;
}
bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
@@ -169,7 +168,6 @@ bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
} else {
return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
}
- return false;
}
bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
@@ -2028,7 +2026,6 @@ double Simulator::ReadD(int32_t addr, Instruction* instr) {
PrintF("Unaligned (double) read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
- return 0;
}
void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
@@ -2055,7 +2052,6 @@ uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
- return 0;
}
int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
@@ -2068,7 +2064,6 @@ int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
PrintF("Unaligned signed halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
- return 0;
}
void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
@@ -2330,7 +2325,6 @@ void Simulator::SoftwareInterrupt() {
break;
default:
UNREACHABLE();
- break;
}
}
switch (redirection->type()) {
@@ -2365,7 +2359,6 @@ void Simulator::SoftwareInterrupt() {
}
default:
UNREACHABLE();
- break;
}
if (::v8::internal::FLAG_trace_sim) {
switch (redirection->type()) {
@@ -2379,7 +2372,6 @@ void Simulator::SoftwareInterrupt() {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -2930,7 +2922,6 @@ void Simulator::DecodeTypeRegisterDRsType() {
UNSUPPORTED();
}
break;
- break;
}
case TRUNC_L_D: { // Mips32r2 instruction.
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
@@ -4233,7 +4224,6 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
default:
alu_out = 0x12345678;
UNREACHABLE();
- break;
}
}
}
@@ -4271,7 +4261,6 @@ int Simulator::DecodeMsaDataFormat() {
break;
default:
UNREACHABLE();
- break;
}
} else {
int DF[] = {MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD};
@@ -4316,7 +4305,6 @@ int Simulator::DecodeMsaDataFormat() {
break;
default:
UNREACHABLE();
- break;
}
}
return df;
@@ -4682,7 +4670,6 @@ void Simulator::DecodeTypeMsaELM() {
case SPLATI:
case INSVE:
UNIMPLEMENTED();
- break;
default:
UNREACHABLE();
}
@@ -6798,7 +6785,6 @@ void Simulator::DecodeTypeImmediate() {
}
default:
UNREACHABLE();
- break;
}
}
}
@@ -6856,7 +6842,6 @@ void Simulator::DecodeTypeImmediate() {
break;
default:
UNREACHABLE();
- break;
}
break;
default:
@@ -6880,14 +6865,16 @@ void Simulator::DecodeTypeImmediate() {
// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
void Simulator::DecodeTypeJump() {
- SimInstruction simInstr = instr_;
+ // instr_ will be overwritten by BranchDelayInstructionDecode(), so we save
+ // the result of IsLinkingInstruction now.
+ bool isLinkingInstr = instr_.IsLinkingInstruction();
// Get current pc.
int32_t current_pc = get_pc();
// Get unchanged bits of pc.
int32_t pc_high_bits = current_pc & 0xF0000000;
// Next pc.
- int32_t next_pc = pc_high_bits | (simInstr.Imm26Value() << 2);
+ int32_t next_pc = pc_high_bits | (instr_.Imm26Value() << 2);
// Execute branch delay slot.
// We don't check for end_sim_pc. First it should not be met as the current pc
@@ -6898,7 +6885,7 @@ void Simulator::DecodeTypeJump() {
// Update pc and ra if necessary.
// Do this after the branch delay execution.
- if (simInstr.IsLinkingInstruction()) {
+ if (isLinkingInstr) {
set_register(31, current_pc + 2 * kInstrSize);
}
set_pc(next_pc);
diff --git a/deps/v8/src/execution/mips64/simulator-mips64.cc b/deps/v8/src/execution/mips64/simulator-mips64.cc
index d45889e5a2..f628653900 100644
--- a/deps/v8/src/execution/mips64/simulator-mips64.cc
+++ b/deps/v8/src/execution/mips64/simulator-mips64.cc
@@ -159,7 +159,6 @@ bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
} else {
return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
}
- return false;
}
bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
@@ -2039,7 +2038,6 @@ double Simulator::ReadD(int64_t addr, Instruction* instr) {
PrintF("Unaligned (double) read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
- return 0;
}
void Simulator::WriteD(int64_t addr, double value, Instruction* instr) {
@@ -2330,7 +2328,6 @@ void Simulator::SoftwareInterrupt() {
break;
default:
UNREACHABLE();
- break;
}
}
switch (redirection->type()) {
@@ -2365,7 +2362,6 @@ void Simulator::SoftwareInterrupt() {
}
default:
UNREACHABLE();
- break;
}
if (::v8::internal::FLAG_trace_sim) {
switch (redirection->type()) {
@@ -2379,7 +2375,6 @@ void Simulator::SoftwareInterrupt() {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -4404,7 +4399,6 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
default:
alu_out = 0x12345678;
UNREACHABLE();
- break;
}
break;
}
@@ -4503,7 +4497,6 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
default:
alu_out = 0x12345678;
UNREACHABLE();
- break;
}
break;
}
@@ -4542,7 +4535,6 @@ int Simulator::DecodeMsaDataFormat() {
break;
default:
UNREACHABLE();
- break;
}
} else {
int DF[] = {MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD};
@@ -4587,7 +4579,6 @@ int Simulator::DecodeMsaDataFormat() {
break;
default:
UNREACHABLE();
- break;
}
}
return df;
@@ -4967,7 +4958,6 @@ void Simulator::DecodeTypeMsaELM() {
case SPLATI:
case INSVE:
UNIMPLEMENTED();
- break;
default:
UNREACHABLE();
}
@@ -7187,7 +7177,6 @@ void Simulator::DecodeTypeImmediate() {
}
default:
UNREACHABLE();
- break;
}
break;
}
@@ -7273,7 +7262,6 @@ void Simulator::DecodeTypeImmediate() {
break;
default:
UNREACHABLE();
- break;
}
break;
default:
@@ -7297,13 +7285,15 @@ void Simulator::DecodeTypeImmediate() {
// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
void Simulator::DecodeTypeJump() {
- SimInstruction simInstr = instr_;
+ // instr_ will be overwritten by BranchDelayInstructionDecode(), so we save
+ // the result of IsLinkingInstruction now.
+ bool isLinkingInstr = instr_.IsLinkingInstruction();
// Get current pc.
int64_t current_pc = get_pc();
// Get unchanged bits of pc.
int64_t pc_high_bits = current_pc & 0xFFFFFFFFF0000000;
// Next pc.
- int64_t next_pc = pc_high_bits | (simInstr.Imm26Value() << 2);
+ int64_t next_pc = pc_high_bits | (instr_.Imm26Value() << 2);
// Execute branch delay slot.
// We don't check for end_sim_pc. First it should not be met as the current pc
@@ -7314,7 +7304,7 @@ void Simulator::DecodeTypeJump() {
// Update pc and ra if necessary.
// Do this after the branch delay execution.
- if (simInstr.IsLinkingInstruction()) {
+ if (isLinkingInstr) {
set_register(31, current_pc + 2 * kInstrSize);
}
set_pc(next_pc);
diff --git a/deps/v8/src/execution/mips64/simulator-mips64.h b/deps/v8/src/execution/mips64/simulator-mips64.h
index ce3f06f2ed..69e8094174 100644
--- a/deps/v8/src/execution/mips64/simulator-mips64.h
+++ b/deps/v8/src/execution/mips64/simulator-mips64.h
@@ -243,7 +243,7 @@ class Simulator : public SimulatorBase {
void set_register(int reg, int64_t value);
void set_register_word(int reg, int32_t value);
void set_dw_register(int dreg, const int* dbl);
- int64_t get_register(int reg) const;
+ V8_EXPORT_PRIVATE int64_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
// Same for FPURegisters.
void set_fpu_register(int fpureg, int64_t value);
@@ -291,7 +291,7 @@ class Simulator : public SimulatorBase {
unsigned int get_msacsr_rounding_mode();
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int64_t value);
- int64_t get_pc() const;
+ V8_EXPORT_PRIVATE int64_t get_pc() const;
Address get_sp() const { return static_cast<Address>(get_register(sp)); }
diff --git a/deps/v8/src/execution/ppc/simulator-ppc.cc b/deps/v8/src/execution/ppc/simulator-ppc.cc
index 5e9751c07a..f6ee75e809 100644
--- a/deps/v8/src/execution/ppc/simulator-ppc.cc
+++ b/deps/v8/src/execution/ppc/simulator-ppc.cc
@@ -93,16 +93,12 @@ bool PPCDebugger::GetValue(const char* desc, intptr_t* value) {
if (regnum != kNoRegister) {
*value = GetRegisterValue(regnum);
return true;
- } else {
- if (strncmp(desc, "0x", 2) == 0) {
- return SScanF(desc + 2, "%" V8PRIxPTR,
- reinterpret_cast<uintptr_t*>(value)) == 1;
- } else {
- return SScanF(desc, "%" V8PRIuPTR, reinterpret_cast<uintptr_t*>(value)) ==
- 1;
- }
}
- return false;
+ if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc + 2, "%" V8PRIxPTR,
+ reinterpret_cast<uintptr_t*>(value)) == 1;
+ }
+ return SScanF(desc, "%" V8PRIuPTR, reinterpret_cast<uintptr_t*>(value)) == 1;
}
bool PPCDebugger::GetFPDoubleValue(const char* desc, double* value) {
@@ -1031,7 +1027,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
if (!stack_aligned) {
PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
@@ -1071,7 +1066,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
switch (redirection->type()) {
@@ -1085,7 +1079,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -1704,7 +1697,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
case CRORC:
case CROR: {
UNIMPLEMENTED(); // Not used by V8.
- break;
}
case RLWIMIX: {
int ra = instr->RAValue();
@@ -2552,7 +2544,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rs = instr->RSValue();
int ra = instr->RAValue();
uint32_t rs_val = static_cast<uint32_t>(get_register(rs));
- uintptr_t count = __builtin_ctz(rs_val);
+ uintptr_t count = rs_val == 0 ? 32 : __builtin_ctz(rs_val);
set_register(ra, count);
if (instr->Bit(0)) { // RC Bit set
int bf = 0;
@@ -2570,7 +2562,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rs = instr->RSValue();
int ra = instr->RAValue();
uint64_t rs_val = get_register(rs);
- uintptr_t count = __builtin_ctz(rs_val);
+ uintptr_t count = rs_val == 0 ? 64 : __builtin_ctzl(rs_val);
set_register(ra, count);
if (instr->Bit(0)) { // RC Bit set
int bf = 0;
@@ -3192,7 +3184,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
case LMW:
case STMW: {
UNIMPLEMENTED();
- break;
}
case LFSU:
@@ -3282,7 +3273,25 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
break;
}
-
+ case BRW: {
+ constexpr int kBitsPerWord = 32;
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ uint64_t rs_val = get_register(rs);
+ uint32_t rs_high = rs_val >> kBitsPerWord;
+ uint32_t rs_low = (rs_val << kBitsPerWord) >> kBitsPerWord;
+ uint64_t result = __builtin_bswap32(rs_high);
+ result = (result << kBitsPerWord) | __builtin_bswap32(rs_low);
+ set_register(ra, result);
+ break;
+ }
+ case BRD: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ uint64_t rs_val = get_register(rs);
+ set_register(ra, __builtin_bswap64(rs_val));
+ break;
+ }
case FCFIDS: {
// fcfids
int frt = instr->RTValue();
@@ -3512,7 +3521,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
default:
UNIMPLEMENTED(); // Not used by V8.
- break;
}
if (frb_val < static_cast<double>(kMinVal)) {
frt_val = kMinVal;
@@ -3557,7 +3565,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
default:
UNIMPLEMENTED(); // Not used by V8.
- break;
}
if (frb_val < static_cast<double>(kMinVal)) {
frt_val = kMinVal;
@@ -3609,7 +3616,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
default:
UNIMPLEMENTED(); // Not used by V8.
- break;
}
if (frb_val < kMinVal) {
frt_val = kMinVal;
@@ -3653,7 +3659,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
default:
UNIMPLEMENTED(); // Not used by V8.
- break;
}
if (frb_val < kMinVal) {
frt_val = kMinVal;
@@ -3746,7 +3751,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
return;
}
@@ -4728,6 +4732,36 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
break;
}
+ case XSCVSPDPN: {
+ int t = instr->RTValue();
+ int b = instr->RBValue();
+ uint64_t double_bits = get_d_register(b);
+ // Value is at the high 32 bits of the register.
+ float f =
+ bit_cast<float, uint32_t>(static_cast<uint32_t>(double_bits >> 32));
+ double_bits = bit_cast<uint64_t, double>(static_cast<double>(f));
+ // Preserve snan.
+ if (issignaling(f)) {
+ double_bits &= 0xFFF7FFFFFFFFFFFFU; // Clear bit 51.
+ }
+ set_d_register(t, double_bits);
+ break;
+ }
+ case XSCVDPSPN: {
+ int t = instr->RTValue();
+ int b = instr->RBValue();
+ double b_val = get_double_from_d_register(b);
+ uint64_t float_bits = static_cast<uint64_t>(
+ bit_cast<uint32_t, float>(static_cast<float>(b_val)));
+ // Preserve snan.
+ if (issignaling(b_val)) {
+ float_bits &= 0xFFBFFFFFU; // Clear bit 22.
+ }
+ // fp result is placed in both 32bit halfs of the dst.
+ float_bits = (float_bits << 32) | float_bits;
+ set_d_register(t, float_bits);
+ break;
+ }
#define VECTOR_UNPACK(S, D, if_high_side) \
int t = instr->RTValue(); \
int b = instr->RBValue(); \
@@ -5118,7 +5152,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
#undef GET_ADDRESS
default: {
UNIMPLEMENTED();
- break;
}
}
}
diff --git a/deps/v8/src/execution/riscv64/simulator-riscv64.cc b/deps/v8/src/execution/riscv64/simulator-riscv64.cc
index 3ec0c0e811..1b72aa9862 100644
--- a/deps/v8/src/execution/riscv64/simulator-riscv64.cc
+++ b/deps/v8/src/execution/riscv64/simulator-riscv64.cc
@@ -60,6 +60,544 @@
#include "src/runtime/runtime-utils.h"
#include "src/utils/ostreams.h"
+// The following code about RVV was based from:
+// https://github.com/riscv/riscv-isa-sim
+// Copyright (c) 2010-2017, The Regents of the University of California
+// (Regents). All Rights Reserved.
+
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// 3. Neither the name of the Regents nor the
+// names of its contributors may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+
+// IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+// SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS,
+// ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
+// REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED
+// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
+// HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
+// MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+#define RVV_VI_GENERAL_LOOP_BASE \
+ for (uint64_t i = rvv_vstart(); i < rvv_vl(); i++) {
+#define RVV_VI_LOOP_END \
+ set_rvv_vstart(0); \
+ }
+
+#define RVV_VI_MASK_VARS \
+ const uint8_t midx = i / 64; \
+ const uint8_t mpos = i % 64;
+
+#define RVV_VI_LOOP_MASK_SKIP(BODY) \
+ RVV_VI_MASK_VARS \
+ if (instr_.RvvVM() == 0) { \
+ bool skip = ((Rvvelt<uint64_t>(0, midx) >> mpos) & 0x1) == 0; \
+ if (skip) { \
+ continue; \
+ } \
+ }
+
+#define RVV_VI_VV_LOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VV_PARAMS(8); \
+ BODY \
+ } else if (rvv_vsew() == E16) { \
+ VV_PARAMS(16); \
+ BODY \
+ } else if (rvv_vsew() == E32) { \
+ VV_PARAMS(32); \
+ BODY \
+ } else if (rvv_vsew() == E64) { \
+ VV_PARAMS(64); \
+ BODY \
+ } else if (rvv_vsew() == E128) { \
+ VV_PARAMS(128); \
+ BODY \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VV_ULOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VV_UPARAMS(8); \
+ BODY \
+ } else if (rvv_vsew() == E16) { \
+ VV_UPARAMS(16); \
+ BODY \
+ } else if (rvv_vsew() == E32) { \
+ VV_UPARAMS(32); \
+ BODY \
+ } else if (rvv_vsew() == E64) { \
+ VV_UPARAMS(64); \
+ BODY \
+ } else if (rvv_vsew() == E128) { \
+ VV_UPARAMS(128); \
+ BODY \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VX_LOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VX_PARAMS(8); \
+ BODY \
+ } else if (rvv_vsew() == E16) { \
+ VX_PARAMS(16); \
+ BODY \
+ } else if (rvv_vsew() == E32) { \
+ VX_PARAMS(32); \
+ BODY \
+ } else if (rvv_vsew() == E64) { \
+ VX_PARAMS(64); \
+ BODY \
+ } else if (rvv_vsew() == E128) { \
+ VX_PARAMS(128); \
+ BODY \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VX_ULOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VX_UPARAMS(8); \
+ BODY \
+ } else if (rvv_vsew() == E16) { \
+ VX_UPARAMS(16); \
+ BODY \
+ } else if (rvv_vsew() == E32) { \
+ VX_UPARAMS(32); \
+ BODY \
+ } else if (rvv_vsew() == E64) { \
+ VX_UPARAMS(64); \
+ BODY \
+ } else if (rvv_vsew() == E128) { \
+ VX_UPARAMS(128); \
+ BODY \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VI_LOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VI_PARAMS(8); \
+ BODY \
+ } else if (rvv_vsew() == E16) { \
+ VI_PARAMS(16); \
+ BODY \
+ } else if (rvv_vsew() == E32) { \
+ VI_PARAMS(32); \
+ BODY \
+ } else if (rvv_vsew() == E64) { \
+ VI_PARAMS(64); \
+ BODY \
+ } else if (rvv_vsew() == E128) { \
+ VI_PARAMS(128); \
+ BODY \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VI_ULOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VI_UPARAMS(8); \
+ BODY \
+ } else if (rvv_vsew() == E16) { \
+ VI_UPARAMS(16); \
+ BODY \
+ } else if (rvv_vsew() == E32) { \
+ VI_UPARAMS(32); \
+ BODY \
+ } else if (rvv_vsew() == E64) { \
+ VI_UPARAMS(64); \
+ BODY \
+ } else if (rvv_vsew() == E128) { \
+ VI_UPARAMS(128); \
+ BODY \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VVXI_MERGE_LOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ if (rvv_vsew() == E8) { \
+ VXI_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VXI_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VXI_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VXI_PARAMS(64); \
+ BODY; \
+ } else if (rvv_vsew() == E128) { \
+ VXI_PARAMS(128); \
+ BODY \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define VV_WITH_CARRY_PARAMS(x) \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i); \
+ type_sew_t<x>::type vs1 = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), i); \
+ type_sew_t<x>::type& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true);
+
+#define XI_WITH_CARRY_PARAMS(x) \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i); \
+ type_sew_t<x>::type rs1 = (type_sew_t<x>::type)(get_register(rs1_reg())); \
+ type_sew_t<x>::type simm5 = (type_sew_t<x>::type)instr_.RvvSimm5(); \
+ type_sew_t<x>::type& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true);
+
+// carry/borrow bit loop
+#define RVV_VI_VV_LOOP_WITH_CARRY(BODY) \
+ CHECK_NE(rvv_vd_reg(), 0); \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_MASK_VARS \
+ if (rvv_vsew() == E8) { \
+ VV_WITH_CARRY_PARAMS(8) \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VV_WITH_CARRY_PARAMS(16) \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VV_WITH_CARRY_PARAMS(32) \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VV_WITH_CARRY_PARAMS(64) \
+ BODY; \
+ } \
+ RVV_VI_LOOP_END
+
+#define RVV_VI_XI_LOOP_WITH_CARRY(BODY) \
+ CHECK_NE(rvv_vd_reg(), 0); \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_MASK_VARS \
+ if (rvv_vsew() == E8) { \
+ XI_WITH_CARRY_PARAMS(8) \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ XI_WITH_CARRY_PARAMS(16) \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ XI_WITH_CARRY_PARAMS(32) \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ XI_WITH_CARRY_PARAMS(64) \
+ BODY; \
+ } \
+ RVV_VI_LOOP_END
+
+#define VV_CMP_PARAMS(x) \
+ type_sew_t<x>::type vs1 = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), i); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VX_CMP_PARAMS(x) \
+ type_sew_t<x>::type rs1 = (type_sew_t<x>::type)(get_register(rs1_reg())); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VI_CMP_PARAMS(x) \
+ type_sew_t<x>::type simm5 = (type_sew_t<x>::type)instr_.RvvSimm5(); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VV_UCMP_PARAMS(x) \
+ type_usew_t<x>::type vs1 = Rvvelt<type_usew_t<x>::type>(rvv_vs1_reg(), i); \
+ type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VX_UCMP_PARAMS(x) \
+ type_usew_t<x>::type rs1 = \
+ (type_sew_t<x>::type)(get_register(rvv_vs1_reg())); \
+ type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VI_UCMP_PARAMS(x) \
+ type_usew_t<x>::type uimm5 = (type_usew_t<x>::type)instr_.RvvUimm5(); \
+ type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define RVV_VI_LOOP_CMP_BASE \
+ CHECK(rvv_vsew() >= E8 && rvv_vsew() <= E64); \
+ for (reg_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
+ RVV_VI_LOOP_MASK_SKIP(); \
+ uint64_t mmask = uint64_t(1) << mpos; \
+ uint64_t& vdi = Rvvelt<uint64_t>(rvv_vd_reg(), midx, true); \
+ uint64_t res = 0;
+
+#define RVV_VI_LOOP_CMP_END \
+ vdi = (vdi & ~mmask) | (((res) << mpos) & mmask); \
+ } \
+ set_rvv_vstart(0);
+
+// comparision result to masking register
+#define RVV_VI_VV_LOOP_CMP(BODY) \
+ RVV_VI_LOOP_CMP_BASE \
+ if (rvv_vsew() == E8) { \
+ VV_CMP_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VV_CMP_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VV_CMP_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VV_CMP_PARAMS(64); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_CMP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VX_LOOP_CMP(BODY) \
+ RVV_VI_LOOP_CMP_BASE \
+ if (rvv_vsew() == E8) { \
+ VX_CMP_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VX_CMP_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VX_CMP_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VX_CMP_PARAMS(64); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_CMP_END
+
+#define RVV_VI_VI_LOOP_CMP(BODY) \
+ RVV_VI_LOOP_CMP_BASE \
+ if (rvv_vsew() == E8) { \
+ VI_CMP_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VI_CMP_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VI_CMP_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VI_CMP_PARAMS(64); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_CMP_END
+
+#define RVV_VI_VV_ULOOP_CMP(BODY) \
+ RVV_VI_LOOP_CMP_BASE \
+ if (rvv_vsew() == E8) { \
+ VV_UCMP_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VV_UCMP_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VV_UCMP_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VV_UCMP_PARAMS(64); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_CMP_END
+
+#define RVV_VI_VX_ULOOP_CMP(BODY) \
+ RVV_VI_LOOP_CMP_BASE \
+ if (rvv_vsew() == E8) { \
+ VX_UCMP_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VX_UCMP_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VX_UCMP_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VX_UCMP_PARAMS(64); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_CMP_END
+
+#define RVV_VI_VI_ULOOP_CMP(BODY) \
+ RVV_VI_LOOP_CMP_BASE \
+ if (rvv_vsew() == E8) { \
+ VI_UCMP_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VI_UCMP_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VI_UCMP_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VI_UCMP_PARAMS(64); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_CMP_END
+
+// reduction loop - signed
+#define RVV_VI_LOOP_REDUCTION_BASE(x) \
+ auto& vd_0_des = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), 0, true); \
+ auto vd_0_res = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), 0); \
+ for (uint64_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
+ RVV_VI_LOOP_MASK_SKIP(); \
+ auto vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define RVV_VI_LOOP_REDUCTION_END(x) \
+ } \
+ if (rvv_vl() > 0) { \
+ vd_0_des = vd_0_res; \
+ } \
+ set_rvv_vstart(0);
+
+#define REDUCTION_LOOP(x, BODY) \
+ RVV_VI_LOOP_REDUCTION_BASE(x) \
+ BODY; \
+ RVV_VI_LOOP_REDUCTION_END(x)
+
+#define RVV_VI_VV_LOOP_REDUCTION(BODY) \
+ if (rvv_vsew() == E8) { \
+ REDUCTION_LOOP(8, BODY) \
+ } else if (rvv_vsew() == E16) { \
+ REDUCTION_LOOP(16, BODY) \
+ } else if (rvv_vsew() == E32) { \
+ REDUCTION_LOOP(32, BODY) \
+ } else if (rvv_vsew() == E64) { \
+ REDUCTION_LOOP(64, BODY) \
+ } \
+ rvv_trace_vd();
+
+// reduction loop - unsgied
+#define RVV_VI_ULOOP_REDUCTION_BASE(x) \
+ auto& vd_0_des = Rvvelt<type_usew_t<x>::type>(rvv_vd_reg(), 0, true); \
+ auto vd_0_res = Rvvelt<type_usew_t<x>::type>(rvv_vs1_reg(), 0); \
+ for (reg_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
+ RVV_VI_LOOP_MASK_SKIP(); \
+ auto vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define REDUCTION_ULOOP(x, BODY) \
+ RVV_VI_ULOOP_REDUCTION_BASE(x) \
+ BODY; \
+ RVV_VI_LOOP_REDUCTION_END(x)
+
+#define RVV_VI_VV_ULOOP_REDUCTION(BODY) \
+ if (rvv_vsew() == E8) { \
+ REDUCTION_ULOOP(8, BODY) \
+ } else if (rvv_vsew() == E16) { \
+ REDUCTION_ULOOP(16, BODY) \
+ } else if (rvv_vsew() == E32) { \
+ REDUCTION_ULOOP(32, BODY) \
+ } else if (rvv_vsew() == E64) { \
+ REDUCTION_ULOOP(64, BODY) \
+ } \
+ rvv_trace_vd();
+
+#define VI_STRIP(inx) reg_t vreg_inx = inx;
+
+#define VI_ELEMENT_SKIP(inx) \
+ if (inx >= vl) { \
+ continue; \
+ } else if (inx < rvv_vstart()) { \
+ continue; \
+ } else { \
+ RVV_VI_LOOP_MASK_SKIP(); \
+ }
+
+#define require_vm \
+ do { \
+ if (instr_.RvvVM() == 0) CHECK_NE(rvv_vd_reg(), 0); \
+ } while (0);
+
+#define VI_CHECK_STORE(elt_width, is_mask_ldst) \
+ reg_t veew = is_mask_ldst ? 1 : sizeof(elt_width##_t) * 8;
+// float vemul = is_mask_ldst ? 1 : ((float)veew / rvv_vsew() * P.VU.vflmul);
+// reg_t emul = vemul < 1 ? 1 : vemul;
+// require(vemul >= 0.125 && vemul <= 8);
+// require_align(rvv_rd(), vemul);
+// require((nf * emul) <= (NVPR / 4) && (rvv_rd() + nf * emul) <= NVPR);
+
+#define VI_CHECK_LOAD(elt_width, is_mask_ldst) \
+ VI_CHECK_STORE(elt_width, is_mask_ldst); \
+ require_vm;
+
+/*vd + fn * emul*/
+#define RVV_VI_LD(stride, offset, elt_width, is_mask_ldst) \
+ const reg_t nf = rvv_nf() + 1; \
+ const reg_t vl = is_mask_ldst ? ((rvv_vl() + 7) / 8) : rvv_vl(); \
+ const int64_t baseAddr = rs1(); \
+ for (reg_t i = 0; i < vl; ++i) { \
+ VI_ELEMENT_SKIP(i); \
+ VI_STRIP(i); \
+ set_rvv_vstart(i); \
+ for (reg_t fn = 0; fn < nf; ++fn) { \
+ auto val = ReadMem<elt_width##_t>( \
+ baseAddr + (stride) + (offset) * sizeof(elt_width##_t), \
+ instr_.instr()); \
+ type_sew_t<sizeof(elt_width##_t)* 8>::type& vd = \
+ Rvvelt<type_sew_t<sizeof(elt_width##_t) * 8>::type>(rvv_vd_reg(), \
+ vreg_inx, true); \
+ vd = val; \
+ } \
+ } \
+ set_rvv_vstart(0); \
+ if (::v8::internal::FLAG_trace_sim) { \
+ __int128_t value = Vregister_[rvv_vd_reg()]; \
+ SNPrintF(trace_buf_, "0x%016" PRIx64 "%016" PRIx64 " <-- 0x%016" PRIx64, \
+ *(reinterpret_cast<int64_t*>(&value) + 1), \
+ *reinterpret_cast<int64_t*>(&value), \
+ (uint64_t)(get_register(rs1_reg()))); \
+ }
+
+#define RVV_VI_ST(stride, offset, elt_width, is_mask_ldst) \
+ const reg_t nf = rvv_nf() + 1; \
+ const reg_t vl = is_mask_ldst ? ((rvv_vl() + 7) / 8) : rvv_vl(); \
+ const int64_t baseAddr = rs1(); \
+ for (reg_t i = 0; i < vl; ++i) { \
+ VI_STRIP(i) \
+ VI_ELEMENT_SKIP(i); \
+ set_rvv_vstart(i); \
+ for (reg_t fn = 0; fn < nf; ++fn) { \
+ elt_width##_t vs1 = Rvvelt<type_sew_t<sizeof(elt_width##_t) * 8>::type>( \
+ rvv_vs3_reg(), vreg_inx); \
+ WriteMem(baseAddr + (stride) + (offset) * sizeof(elt_width##_t), vs1, \
+ instr_.instr()); \
+ } \
+ } \
+ set_rvv_vstart(0); \
+ if (::v8::internal::FLAG_trace_sim) { \
+ __int128_t value = Vregister_[rvv_vd_reg()]; \
+ SNPrintF(trace_buf_, "0x%016" PRIx64 "%016" PRIx64 " --> 0x%016" PRIx64, \
+ *(reinterpret_cast<int64_t*>(&value) + 1), \
+ *reinterpret_cast<int64_t*>(&value), \
+ (uint64_t)(get_register(rs1_reg()))); \
+ }
namespace v8 {
namespace internal {
@@ -116,13 +654,14 @@ class RiscvDebugger {
int64_t GetFPURegisterValue(int regnum);
float GetFPURegisterValueFloat(int regnum);
double GetFPURegisterValueDouble(int regnum);
+ __int128_t GetVRegisterValue(int regnum);
bool GetValue(const char* desc, int64_t* value);
};
-inline void UNSUPPORTED() {
- printf("Sim: Unsupported instruction.\n");
+#define UNSUPPORTED() \
+ printf("Sim: Unsupported instruction. Func:%s Line:%d\n", __FUNCTION__, \
+ __LINE__); \
base::OS::Abort();
-}
int64_t RiscvDebugger::GetRegisterValue(int regnum) {
if (regnum == kNumSimuRegisters) {
@@ -156,6 +695,14 @@ double RiscvDebugger::GetFPURegisterValueDouble(int regnum) {
}
}
+__int128_t RiscvDebugger::GetVRegisterValue(int regnum) {
+ if (regnum == kNumVRegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_vregister(regnum);
+ }
+}
+
bool RiscvDebugger::GetValue(const char* desc, int64_t* value) {
int regnum = Registers::Number(desc);
int fpuregnum = FPURegisters::Number(desc);
@@ -172,7 +719,6 @@ bool RiscvDebugger::GetValue(const char* desc, int64_t* value) {
} else {
return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
}
- return false;
}
#define REG_INFO(name) \
@@ -315,6 +861,7 @@ void RiscvDebugger::Debug() {
} else {
int regnum = Registers::Number(arg1);
int fpuregnum = FPURegisters::Number(arg1);
+ int vregnum = VRegisters::Number(arg1);
if (regnum != kInvalidRegister) {
value = GetRegisterValue(regnum);
@@ -325,6 +872,11 @@ void RiscvDebugger::Debug() {
dvalue = GetFPURegisterValueDouble(fpuregnum);
PrintF("%3s: 0x%016" PRIx64 " %16.4e\n",
FPURegisters::Name(fpuregnum), value, dvalue);
+ } else if (vregnum != kInvalidVRegister) {
+ __int128_t v = GetVRegisterValue(vregnum);
+ PrintF("\t%s:0x%016" PRIx64 "%016" PRIx64 "\n",
+ VRegisters::Name(vregnum), (uint64_t)(v >> 64),
+ (uint64_t)v);
} else {
PrintF("%s unrecognized\n", arg1);
}
@@ -960,6 +1512,11 @@ double Simulator::get_fpu_register_double(int fpureg) const {
return *bit_cast<double*>(&FPUregisters_[fpureg]);
}
+__int128_t Simulator::get_vregister(int vreg) const {
+ DCHECK((vreg >= 0) && (vreg < kNumVRegisters));
+ return Vregister_[vreg];
+}
+
// Runtime FP routines take up to two double arguments and zero
// or one integer arguments. All are constructed here,
// from fa0, fa1, and a0.
@@ -1301,6 +1858,9 @@ void Simulator::WriteMem(int64_t addr, T value, Instruction* instr) {
#endif
T* ptr = reinterpret_cast<T*>(addr);
TraceMemWr(addr, value);
+ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" PRId64 "\n",
+ // (int64_t)ptr,
+ // (int64_t)value);
*ptr = value;
}
@@ -1424,7 +1984,6 @@ void Simulator::SoftwareInterrupt() {
break;
default:
UNREACHABLE();
- break;
}
}
switch (redirection->type()) {
@@ -1459,7 +2018,6 @@ void Simulator::SoftwareInterrupt() {
}
default:
UNREACHABLE();
- break;
}
if (::v8::internal::FLAG_trace_sim) {
switch (redirection->type()) {
@@ -1473,7 +2031,6 @@ void Simulator::SoftwareInterrupt() {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -2376,7 +2933,8 @@ void Simulator::DecodeRVRFPType() {
break;
}
case 0b00001: { // RO_FCVT_WU_S
- set_rd(RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode()));
+ set_rd(sext32(
+ RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode())));
break;
}
#ifdef V8_TARGET_ARCH_64_BIT
@@ -2416,7 +2974,6 @@ void Simulator::DecodeRVRFPType() {
}
break;
}
- // TODO(RISCV): Implement handling of NaN (quiet and signalling).
case RO_FLE_S: { // RO_FEQ_S RO_FLT_S RO_FLE_S
switch (instr_.Funct3Value()) {
case 0b010: { // RO_FEQ_S
@@ -2624,7 +3181,6 @@ void Simulator::DecodeRVRFPType() {
case (RO_FCLASS_D & kRFPTypeMask): { // RO_FCLASS_D , 64D RO_FMV_X_D
if (instr_.Rs2Value() != 0b00000) {
UNSUPPORTED();
- break;
}
switch (instr_.Funct3Value()) {
case 0b001: { // RO_FCLASS_D
@@ -2651,7 +3207,8 @@ void Simulator::DecodeRVRFPType() {
break;
}
case 0b00001: { // RO_FCVT_WU_D
- set_rd(RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode()));
+ set_rd(sext32(
+ RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode())));
break;
}
#ifdef V8_TARGET_ARCH_64_BIT
@@ -2826,6 +3383,117 @@ void Simulator::DecodeRVR4Type() {
}
}
+bool Simulator::DecodeRvvVL() {
+ uint32_t instr_temp =
+ instr_.InstructionBits() & (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask);
+ if (RO_V_VL == instr_temp) {
+ if (!(instr_.InstructionBits() & (kRvvRs2Mask))) {
+ switch (instr_.vl_vs_width()) {
+ case 8: {
+ RVV_VI_LD(0, (i * nf + fn), int8, false);
+ break;
+ }
+ case 16: {
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ default:
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ return true;
+ } else {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ }
+ } else if (RO_V_VLS == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VLX == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VLSEG2 == instr_temp || RO_V_VLSEG3 == instr_temp ||
+ RO_V_VLSEG4 == instr_temp || RO_V_VLSEG5 == instr_temp ||
+ RO_V_VLSEG6 == instr_temp || RO_V_VLSEG7 == instr_temp ||
+ RO_V_VLSEG8 == instr_temp) {
+ if (!(instr_.InstructionBits() & (kRvvRs2Mask))) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ }
+ } else if (RO_V_VLSSEG2 == instr_temp || RO_V_VLSSEG3 == instr_temp ||
+ RO_V_VLSSEG4 == instr_temp || RO_V_VLSSEG5 == instr_temp ||
+ RO_V_VLSSEG6 == instr_temp || RO_V_VLSSEG7 == instr_temp ||
+ RO_V_VLSSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VLXSEG2 == instr_temp || RO_V_VLXSEG3 == instr_temp ||
+ RO_V_VLXSEG4 == instr_temp || RO_V_VLXSEG5 == instr_temp ||
+ RO_V_VLXSEG6 == instr_temp || RO_V_VLXSEG7 == instr_temp ||
+ RO_V_VLXSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool Simulator::DecodeRvvVS() {
+ uint32_t instr_temp =
+ instr_.InstructionBits() & (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask);
+ if (RO_V_VS == instr_temp) {
+ if (!(instr_.InstructionBits() & (kRvvRs2Mask))) {
+ switch (instr_.vl_vs_width()) {
+ case 8: {
+ RVV_VI_ST(0, (i * nf + fn), uint8, false);
+ break;
+ }
+ case 16: {
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ default:
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ } else {
+ UNIMPLEMENTED_RISCV();
+ }
+ return true;
+ } else if (RO_V_VSS == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSX == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSU == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSSEG2 == instr_temp || RO_V_VSSEG3 == instr_temp ||
+ RO_V_VSSEG4 == instr_temp || RO_V_VSSEG5 == instr_temp ||
+ RO_V_VSSEG6 == instr_temp || RO_V_VSSEG7 == instr_temp ||
+ RO_V_VSSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSSSEG2 == instr_temp || RO_V_VSSSEG3 == instr_temp ||
+ RO_V_VSSSEG4 == instr_temp || RO_V_VSSSEG5 == instr_temp ||
+ RO_V_VSSSEG6 == instr_temp || RO_V_VSSSEG7 == instr_temp ||
+ RO_V_VSSSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSXSEG2 == instr_temp || RO_V_VSXSEG3 == instr_temp ||
+ RO_V_VSXSEG4 == instr_temp || RO_V_VSXSEG5 == instr_temp ||
+ RO_V_VSXSEG6 == instr_temp || RO_V_VSXSEG7 == instr_temp ||
+ RO_V_VSXSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else {
+ return false;
+ }
+}
+
Builtin Simulator::LookUp(Address pc) {
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
@@ -3061,8 +3729,12 @@ void Simulator::DecodeRVIType() {
TraceMemRd(addr, val, get_fpu_register(frd_reg()));
break;
}
- default:
- UNSUPPORTED();
+ default: {
+ if (!DecodeRvvVL()) {
+ UNSUPPORTED();
+ }
+ break;
+ }
}
}
@@ -3095,7 +3767,10 @@ void Simulator::DecodeRVSType() {
break;
}
default:
- UNSUPPORTED();
+ if (!DecodeRvvVS()) {
+ UNSUPPORTED();
+ }
+ break;
}
}
@@ -3403,6 +4078,794 @@ void Simulator::DecodeCBType() {
}
}
+/**
+ * RISCV-ISA-SIM
+ *
+ * @link https://github.com/riscv/riscv-isa-sim/
+ * @copyright Copyright (c) The Regents of the University of California
+ * @license hhttps://github.com/riscv/riscv-isa-sim/blob/master/LICENSE
+ */
+// ref: https://locklessinc.com/articles/sat_arithmetic/
+template <typename T, typename UT>
+static inline T sat_add(T x, T y, bool& sat) {
+ UT ux = x;
+ UT uy = y;
+ UT res = ux + uy;
+ sat = false;
+ int sh = sizeof(T) * 8 - 1;
+
+ /* Calculate overflowed result. (Don't change the sign bit of ux) */
+ ux = (ux >> sh) + (((UT)0x1 << sh) - 1);
+
+ /* Force compiler to use cmovns instruction */
+ if ((T)((ux ^ uy) | ~(uy ^ res)) >= 0) {
+ res = ux;
+ sat = true;
+ }
+
+ return res;
+}
+
+template <typename T, typename UT>
+static inline T sat_sub(T x, T y, bool& sat) {
+ UT ux = x;
+ UT uy = y;
+ UT res = ux - uy;
+ sat = false;
+ int sh = sizeof(T) * 8 - 1;
+
+ /* Calculate overflowed result. (Don't change the sign bit of ux) */
+ ux = (ux >> sh) + (((UT)0x1 << sh) - 1);
+
+ /* Force compiler to use cmovns instruction */
+ if ((T)((ux ^ uy) & (ux ^ res)) < 0) {
+ res = ux;
+ sat = true;
+ }
+
+ return res;
+}
+
+void Simulator::DecodeRvvIVV() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVV);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VADD_VV: {
+ RVV_VI_VV_LOOP({ vd = vs1 + vs2; });
+ break;
+ }
+ case RO_V_VSADD_VV: {
+ RVV_VI_GENERAL_LOOP_BASE
+ bool sat = false;
+ switch (rvv_vsew()) {
+ case E8: {
+ VV_PARAMS(8);
+ vd = sat_add<int8_t, uint8_t>(vs2, vs1, sat);
+ break;
+ }
+ case E16: {
+ VV_PARAMS(16);
+ vd = sat_add<int16_t, uint16_t>(vs2, vs1, sat);
+ break;
+ }
+ case E32: {
+ VV_PARAMS(32);
+ vd = sat_add<int32_t, uint32_t>(vs2, vs1, sat);
+ break;
+ }
+ default: {
+ VV_PARAMS(64);
+ vd = sat_add<int64_t, uint64_t>(vs2, vs1, sat);
+ break;
+ }
+ }
+ set_rvv_vxsat(sat);
+ RVV_VI_LOOP_END
+ break;
+ }
+ case RO_V_VSUB_VV: {
+ RVV_VI_VV_LOOP({ vd = vs2 - vs1; })
+ break;
+ }
+ case RO_V_VSSUB_VV: {
+ RVV_VI_GENERAL_LOOP_BASE
+ bool sat = false;
+ switch (rvv_vsew()) {
+ case E8: {
+ VV_PARAMS(8);
+ vd = sat_sub<int8_t, uint8_t>(vs2, vs1, sat);
+ break;
+ }
+ case E16: {
+ VV_PARAMS(16);
+ vd = sat_sub<int16_t, uint16_t>(vs2, vs1, sat);
+ break;
+ }
+ case E32: {
+ VV_PARAMS(32);
+ vd = sat_sub<int32_t, uint32_t>(vs2, vs1, sat);
+ break;
+ }
+ default: {
+ VV_PARAMS(64);
+ vd = sat_sub<int64_t, uint64_t>(vs2, vs1, sat);
+ break;
+ }
+ }
+ set_rvv_vxsat(sat);
+ RVV_VI_LOOP_END
+ break;
+ }
+ case RO_V_VAND_VV: {
+ RVV_VI_VV_LOOP({ vd = vs1 & vs2; })
+ break;
+ }
+ case RO_V_VOR_VV: {
+ RVV_VI_VV_LOOP({ vd = vs1 | vs2; })
+ break;
+ }
+ case RO_V_VXOR_VV: {
+ RVV_VI_VV_LOOP({ vd = vs1 ^ vs2; })
+ break;
+ }
+ case RO_V_VMAXU_VV: {
+ RVV_VI_VV_ULOOP({
+ if (vs1 <= vs2) {
+ vd = vs2;
+ } else {
+ vd = vs1;
+ }
+ })
+ break;
+ }
+ case RO_V_VMAX_VV: {
+ RVV_VI_VV_LOOP({
+ if (vs1 <= vs2) {
+ vd = vs2;
+ } else {
+ vd = vs1;
+ }
+ })
+ break;
+ }
+ case RO_V_VMINU_VV: {
+ RVV_VI_VV_ULOOP({
+ if (vs1 <= vs2) {
+ vd = vs1;
+ } else {
+ vd = vs2;
+ }
+ })
+ break;
+ }
+ case RO_V_VMIN_VV: {
+ RVV_VI_VV_LOOP({
+ if (vs1 <= vs2) {
+ vd = vs1;
+ } else {
+ vd = vs2;
+ }
+ })
+ break;
+ }
+ case RO_V_VMV_VV: {
+ if (instr_.RvvVM()) {
+ RVV_VI_VVXI_MERGE_LOOP({
+ vd = vs1;
+ USE(simm5);
+ USE(vs2);
+ USE(rs1);
+ });
+ } else {
+ RVV_VI_VVXI_MERGE_LOOP({
+ bool use_first = (Rvvelt<uint64_t>(0, (i / 64)) >> (i % 64)) & 0x1;
+ vd = use_first ? vs1 : vs2;
+ USE(simm5);
+ USE(rs1);
+ });
+ }
+ break;
+ }
+ case RO_V_VMSEQ_VV: {
+ RVV_VI_VV_LOOP_CMP({ res = vs1 == vs2; })
+ break;
+ }
+ case RO_V_VMSNE_VV: {
+ RVV_VI_VV_LOOP_CMP({ res = vs1 != vs2; })
+ break;
+ }
+ case RO_V_VMSLTU_VV: {
+ RVV_VI_VV_ULOOP_CMP({ res = vs2 < vs1; })
+ break;
+ }
+ case RO_V_VMSLT_VV: {
+ RVV_VI_VV_LOOP_CMP({ res = vs2 < vs1; })
+ break;
+ }
+ case RO_V_VMSLE_VV: {
+ RVV_VI_VV_LOOP_CMP({ res = vs2 <= vs1; })
+ break;
+ }
+ case RO_V_VMSLEU_VV: {
+ RVV_VI_VV_ULOOP_CMP({ res = vs2 <= vs1; })
+ break;
+ }
+ case RO_V_VADC_VV:
+ if (instr_.RvvVM()) {
+ RVV_VI_VV_LOOP_WITH_CARRY({
+ auto& v0 = Rvvelt<uint64_t>(0, midx);
+ vd = vs1 + vs2 + (v0 >> mpos) & 0x1;
+ })
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case RO_V_VSLL_VV: {
+ RVV_VI_VV_LOOP({ vd = vs2 << vs1; })
+ break;
+ }
+ case RO_V_VRGATHER_VV: {
+ RVV_VI_GENERAL_LOOP_BASE
+ switch (rvv_vsew()) {
+ case E8: {
+ auto vs1 = Rvvelt<uint8_t>(rvv_vs1_reg(), i);
+ // if (i > 255) continue;
+ Rvvelt<uint8_t>(rvv_vd_reg(), i, true) =
+ vs1 >= rvv_vlmax() ? 0 : Rvvelt<uint8_t>(rvv_vs2_reg(), vs1);
+ break;
+ }
+ case E16: {
+ auto vs1 = Rvvelt<uint16_t>(rvv_vs1_reg(), i);
+ Rvvelt<uint16_t>(rvv_vd_reg(), i, true) =
+ vs1 >= rvv_vlmax() ? 0 : Rvvelt<uint16_t>(rvv_vs2_reg(), vs1);
+ break;
+ }
+ case E32: {
+ auto vs1 = Rvvelt<uint32_t>(rvv_vs1_reg(), i);
+ Rvvelt<uint32_t>(rvv_vd_reg(), i, true) =
+ vs1 >= rvv_vlmax() ? 0 : Rvvelt<uint32_t>(rvv_vs2_reg(), vs1);
+ break;
+ }
+ default: {
+ auto vs1 = Rvvelt<uint64_t>(rvv_vs1_reg(), i);
+ Rvvelt<uint64_t>(rvv_vd_reg(), i, true) =
+ vs1 >= rvv_vlmax() ? 0 : Rvvelt<uint64_t>(rvv_vs2_reg(), vs1);
+ break;
+ }
+ }
+ RVV_VI_LOOP_END;
+ break;
+ }
+ default:
+ // v8::base::EmbeddedVector<char, 256> buffer;
+ // SNPrintF(trace_buf_, " ");
+ // disasm::NameConverter converter;
+ // disasm::Disassembler dasm(converter);
+ // // Use a reasonably large buffer.
+ // dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(&instr_));
+
+ // PrintF("EXECUTING 0x%08" PRIxPTR " %-44s\n",
+ // reinterpret_cast<intptr_t>(&instr_), buffer.begin());
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ set_rvv_vstart(0);
+}
+
+void Simulator::DecodeRvvIVI() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVI);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VADD_VI: {
+ RVV_VI_VI_LOOP({ vd = simm5 + vs2; })
+ break;
+ }
+ case RO_V_VSADD_VI: {
+ RVV_VI_GENERAL_LOOP_BASE
+ bool sat = false;
+ switch (rvv_vsew()) {
+ case E8: {
+ VI_PARAMS(8);
+ vd = sat_add<int8_t, uint8_t>(vs2, simm5, sat);
+ break;
+ }
+ case E16: {
+ VI_PARAMS(16);
+ vd = sat_add<int16_t, uint16_t>(vs2, simm5, sat);
+ break;
+ }
+ case E32: {
+ VI_PARAMS(32);
+ vd = sat_add<int32_t, uint32_t>(vs2, simm5, sat);
+ break;
+ }
+ default: {
+ VI_PARAMS(64);
+ vd = sat_add<int64_t, uint64_t>(vs2, simm5, sat);
+ break;
+ }
+ }
+ set_rvv_vxsat(sat);
+ RVV_VI_LOOP_END
+ break;
+ }
+ case RO_V_VRSUB_VI: {
+ RVV_VI_VI_LOOP({ vd = vs2 - simm5; })
+ break;
+ }
+ case RO_V_VAND_VI: {
+ RVV_VI_VI_LOOP({ vd = simm5 & vs2; })
+ break;
+ }
+ case RO_V_VOR_VI: {
+ RVV_VI_VI_LOOP({ vd = simm5 | vs2; })
+ break;
+ }
+ case RO_V_VXOR_VI: {
+ RVV_VI_VI_LOOP({ vd = simm5 ^ vs2; })
+ break;
+ }
+ case RO_V_VMV_VI:
+ if (instr_.RvvVM()) {
+ RVV_VI_VVXI_MERGE_LOOP({
+ vd = simm5;
+ USE(vs1);
+ USE(vs2);
+ USE(rs1);
+ });
+ } else {
+ RVV_VI_VVXI_MERGE_LOOP({
+ bool use_first = (Rvvelt<uint64_t>(0, (i / 64)) >> (i % 64)) & 0x1;
+ vd = use_first ? simm5 : vs2;
+ USE(vs1);
+ USE(rs1);
+ });
+ }
+ break;
+ case RO_V_VMSEQ_VI:
+ RVV_VI_VI_LOOP_CMP({ res = simm5 == vs2; })
+ break;
+ case RO_V_VMSNE_VI:
+ RVV_VI_VI_LOOP_CMP({ res = simm5 != vs2; })
+ break;
+ case RO_V_VMSLEU_VI:
+ RVV_VI_VI_ULOOP_CMP({ res = vs2 <= uimm5; })
+ break;
+ case RO_V_VMSLE_VI:
+ RVV_VI_VI_LOOP_CMP({ res = vs2 <= simm5; })
+ break;
+ case RO_V_VMSGT_VI:
+ RVV_VI_VI_LOOP_CMP({ res = vs2 > simm5; })
+ break;
+ case RO_V_VSLIDEDOWN_VI: {
+ const uint8_t sh = instr_.RvvUimm5();
+ RVV_VI_GENERAL_LOOP_BASE
+
+ reg_t offset = 0;
+ bool is_valid = (i + sh) < rvv_vlmax();
+
+ if (is_valid) {
+ offset = sh;
+ }
+
+ switch (rvv_sew()) {
+ case E8: {
+ VI_XI_SLIDEDOWN_PARAMS(8, offset);
+ vd = is_valid ? vs2 : 0;
+ } break;
+ case E16: {
+ VI_XI_SLIDEDOWN_PARAMS(16, offset);
+ vd = is_valid ? vs2 : 0;
+ } break;
+ case E32: {
+ VI_XI_SLIDEDOWN_PARAMS(32, offset);
+ vd = is_valid ? vs2 : 0;
+ } break;
+ default: {
+ VI_XI_SLIDEDOWN_PARAMS(64, offset);
+ vd = is_valid ? vs2 : 0;
+ } break;
+ }
+ RVV_VI_LOOP_END
+ } break;
+ case RO_V_VSRL_VI:
+ RVV_VI_VI_LOOP({ vd = vs2 >> simm5; })
+ break;
+ case RO_V_VSLL_VI:
+ RVV_VI_VI_LOOP({ vd = vs2 << simm5; })
+ break;
+ case RO_V_VADC_VI:
+ if (instr_.RvvVM()) {
+ RVV_VI_XI_LOOP_WITH_CARRY({
+ auto& v0 = Rvvelt<uint64_t>(0, midx);
+ vd = simm5 + vs2 + (v0 >> mpos) & 0x1;
+ USE(rs1);
+ })
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ default:
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+}
+
+void Simulator::DecodeRvvIVX() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVX);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VADD_VX: {
+ RVV_VI_VX_LOOP({ vd = rs1 + vs2; })
+ break;
+ }
+ case RO_V_VSADD_VX: {
+ RVV_VI_GENERAL_LOOP_BASE
+ bool sat = false;
+ switch (rvv_vsew()) {
+ case E8: {
+ VX_PARAMS(8);
+ vd = sat_add<int8_t, uint8_t>(vs2, rs1, sat);
+ break;
+ }
+ case E16: {
+ VX_PARAMS(16);
+ vd = sat_add<int16_t, uint16_t>(vs2, rs1, sat);
+ break;
+ }
+ case E32: {
+ VX_PARAMS(32);
+ vd = sat_add<int32_t, uint32_t>(vs2, rs1, sat);
+ break;
+ }
+ default: {
+ VX_PARAMS(64);
+ vd = sat_add<int64_t, uint64_t>(vs2, rs1, sat);
+ break;
+ }
+ }
+ set_rvv_vxsat(sat);
+ RVV_VI_LOOP_END
+ break;
+ }
+ case RO_V_VSUB_VX: {
+ RVV_VI_VX_LOOP({ vd = vs2 - rs1; })
+ break;
+ }
+ case RO_V_VSSUB_VX: {
+ RVV_VI_GENERAL_LOOP_BASE
+ bool sat = false;
+ switch (rvv_vsew()) {
+ case E8: {
+ VX_PARAMS(8);
+ vd = sat_sub<int8_t, uint8_t>(vs2, rs1, sat);
+ break;
+ }
+ case E16: {
+ VX_PARAMS(16);
+ vd = sat_sub<int16_t, uint16_t>(vs2, rs1, sat);
+ break;
+ }
+ case E32: {
+ VX_PARAMS(32);
+ vd = sat_sub<int32_t, uint32_t>(vs2, rs1, sat);
+ break;
+ }
+ default: {
+ VX_PARAMS(64);
+ vd = sat_sub<int64_t, uint64_t>(vs2, rs1, sat);
+ break;
+ }
+ }
+ set_rvv_vxsat(sat);
+ RVV_VI_LOOP_END
+ break;
+ }
+ case RO_V_VRSUB_VX: {
+ RVV_VI_VX_LOOP({ vd = rs1 - vs2; })
+ break;
+ }
+ case RO_V_VAND_VX: {
+ RVV_VI_VX_LOOP({ vd = rs1 & vs2; })
+ break;
+ }
+ case RO_V_VOR_VX: {
+ RVV_VI_VX_LOOP({ vd = rs1 | vs2; })
+ break;
+ }
+ case RO_V_VXOR_VX: {
+ RVV_VI_VX_LOOP({ vd = rs1 ^ vs2; })
+ break;
+ }
+ case RO_V_VMAX_VX: {
+ RVV_VI_VX_LOOP({
+ if (rs1 <= vs2) {
+ vd = vs2;
+ } else {
+ vd = rs1;
+ }
+ })
+ break;
+ }
+ case RO_V_VMAXU_VX: {
+ RVV_VI_VX_ULOOP({
+ if (rs1 <= vs2) {
+ vd = vs2;
+ } else {
+ vd = rs1;
+ }
+ })
+ break;
+ }
+ case RO_V_VMINU_VX: {
+ RVV_VI_VX_ULOOP({
+ if (rs1 <= vs2) {
+ vd = rs1;
+ } else {
+ vd = vs2;
+ }
+ })
+ break;
+ }
+ case RO_V_VMIN_VX: {
+ RVV_VI_VX_LOOP({
+ if (rs1 <= vs2) {
+ vd = rs1;
+ } else {
+ vd = vs2;
+ }
+ })
+ break;
+ }
+ case RO_V_VMV_VX:
+ if (instr_.RvvVM()) {
+ RVV_VI_VVXI_MERGE_LOOP({
+ vd = rs1;
+ USE(vs1);
+ USE(vs2);
+ USE(simm5);
+ });
+ } else {
+ RVV_VI_VVXI_MERGE_LOOP({
+ bool use_first = (Rvvelt<uint64_t>(0, (i / 64)) >> (i % 64)) & 0x1;
+ vd = use_first ? rs1 : vs2;
+ USE(vs1);
+ USE(simm5);
+ });
+ }
+ break;
+ case RO_V_VMSEQ_VX:
+ RVV_VI_VX_LOOP_CMP({ res = vs2 == rs1; })
+ break;
+ case RO_V_VMSNE_VX:
+ RVV_VI_VX_LOOP_CMP({ res = vs2 != rs1; })
+ break;
+ case RO_V_VMSLT_VX:
+ RVV_VI_VX_LOOP_CMP({ res = vs2 < rs1; })
+ break;
+ case RO_V_VMSLTU_VX:
+ RVV_VI_VX_ULOOP_CMP({ res = vs2 < rs1; })
+ break;
+ case RO_V_VMSLE_VX:
+ RVV_VI_VX_LOOP_CMP({ res = vs2 <= rs1; })
+ break;
+ case RO_V_VMSLEU_VX:
+ RVV_VI_VX_ULOOP_CMP({ res = vs2 <= rs1; })
+ break;
+ case RO_V_VMSGT_VX:
+ RVV_VI_VX_LOOP_CMP({ res = vs2 > rs1; })
+ break;
+ case RO_V_VMSGTU_VX:
+ RVV_VI_VX_ULOOP_CMP({ res = vs2 > rs1; })
+ break;
+ case RO_V_VSLIDEDOWN_VX:
+ UNIMPLEMENTED_RISCV();
+ break;
+ case RO_V_VADC_VX:
+ if (instr_.RvvVM()) {
+ RVV_VI_XI_LOOP_WITH_CARRY({
+ auto& v0 = Rvvelt<uint64_t>(0, midx);
+ vd = rs1 + vs2 + (v0 >> mpos) & 0x1;
+ USE(simm5);
+ })
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case RO_V_VSLL_VX: {
+ RVV_VI_VX_LOOP({ vd = vs2 << rs1; })
+ break;
+ }
+ default:
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+}
+
+void Simulator::DecodeRvvMVV() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_MVV);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VWXUNARY0: {
+ if (rvv_vs1_reg() == 0) {
+ switch (rvv_vsew()) {
+ case E8:
+ set_rd(Rvvelt<type_sew_t<8>::type>(rvv_vs2_reg(), 0));
+ break;
+ case E16:
+ set_rd(Rvvelt<type_sew_t<16>::type>(rvv_vs2_reg(), 0));
+ break;
+ case E32:
+ set_rd(Rvvelt<type_sew_t<32>::type>(rvv_vs2_reg(), 0));
+ break;
+ case E64:
+ set_rd(Rvvelt<type_sew_t<64>::type>(rvv_vs2_reg(), 0));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ set_rvv_vstart(0);
+ SNPrintF(trace_buf_, "0x%ld", get_register(rd_reg()));
+ } else {
+ v8::base::EmbeddedVector<char, 256> buffer;
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(&instr_));
+ PrintF("EXECUTING 0x%08" PRIxPTR " %-44s\n",
+ reinterpret_cast<intptr_t>(&instr_), buffer.begin());
+ UNIMPLEMENTED_RISCV();
+ }
+ } break;
+ case RO_V_VREDMAXU:
+ RVV_VI_VV_ULOOP_REDUCTION(
+ { vd_0_res = (vd_0_res >= vs2) ? vd_0_res : vs2; })
+ break;
+ case RO_V_VREDMAX:
+ RVV_VI_VV_LOOP_REDUCTION(
+ { vd_0_res = (vd_0_res >= vs2) ? vd_0_res : vs2; })
+ break;
+ case RO_V_VREDMINU:
+ RVV_VI_VV_ULOOP_REDUCTION(
+ { vd_0_res = (vd_0_res <= vs2) ? vd_0_res : vs2; })
+ break;
+ case RO_V_VREDMIN:
+ RVV_VI_VV_LOOP_REDUCTION(
+ { vd_0_res = (vd_0_res <= vs2) ? vd_0_res : vs2; })
+ break;
+ default:
+ v8::base::EmbeddedVector<char, 256> buffer;
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(&instr_));
+ PrintF("EXECUTING 0x%08" PRIxPTR " %-44s\n",
+ reinterpret_cast<intptr_t>(&instr_), buffer.begin());
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+}
+
+void Simulator::DecodeRvvMVX() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_MVX);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VRXUNARY0:
+ if (instr_.Vs2Value() == 0x0) {
+ if (rvv_vl() > 0 && rvv_vstart() < rvv_vl()) {
+ switch (rvv_vsew()) {
+ case E8:
+ Rvvelt<uint8_t>(rvv_vd_reg(), 0, true) =
+ (uint8_t)get_register(rs1_reg());
+ break;
+ case E16:
+ Rvvelt<uint16_t>(rvv_vd_reg(), 0, true) =
+ (uint16_t)get_register(rs1_reg());
+ break;
+ case E32:
+ Rvvelt<uint32_t>(rvv_vd_reg(), 0, true) =
+ (uint32_t)get_register(rs1_reg());
+ break;
+ case E64:
+ Rvvelt<uint64_t>(rvv_vd_reg(), 0, true) =
+ (uint64_t)get_register(rs1_reg());
+ break;
+ default:
+ UNREACHABLE();
+ }
+ // set_rvv_vl(0);
+ }
+ set_rvv_vstart(0);
+ rvv_trace_vd();
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ default:
+ v8::base::EmbeddedVector<char, 256> buffer;
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(&instr_));
+ PrintF("EXECUTING 0x%08" PRIxPTR " %-44s\n",
+ reinterpret_cast<intptr_t>(&instr_), buffer.begin());
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+}
+
+void Simulator::DecodeVType() {
+ switch (instr_.InstructionBits() & (kFunct3Mask | kBaseOpcodeMask)) {
+ case OP_IVV:
+ DecodeRvvIVV();
+ return;
+ case OP_FVV:
+ UNIMPLEMENTED_RISCV();
+ return;
+ case OP_MVV:
+ DecodeRvvMVV();
+ return;
+ case OP_IVI:
+ DecodeRvvIVI();
+ return;
+ case OP_IVX:
+ DecodeRvvIVX();
+ return;
+ case OP_FVF:
+ UNIMPLEMENTED_RISCV();
+ return;
+ case OP_MVX:
+ DecodeRvvMVX();
+ return;
+ }
+ switch (instr_.InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0x80000000)) {
+ case RO_V_VSETVLI: {
+ uint64_t avl;
+ set_rvv_vtype(rvv_zimm());
+ if (rs1_reg() != zero_reg) {
+ avl = rs1();
+ } else if (rd_reg() != zero_reg) {
+ avl = ~0;
+ } else {
+ avl = rvv_vl();
+ }
+ avl = avl <= rvv_vlmax() ? avl : rvv_vlmax();
+ set_rvv_vl(avl);
+ set_rd(rvv_vl());
+ rvv_trace_status();
+ break;
+ }
+ case RO_V_VSETVL: {
+ if (!(instr_.InstructionBits() & 0x40000000)) {
+ uint64_t avl;
+ set_rvv_vtype(rs2());
+ if (rs1_reg() != zero_reg) {
+ avl = rs1();
+ } else if (rd_reg() != zero_reg) {
+ avl = ~0;
+ } else {
+ avl = rvv_vl();
+ }
+ avl = avl <= rvv_vlmax()
+ ? avl
+ : avl < (rvv_vlmax() * 2) ? avl / 2 : rvv_vlmax();
+ set_rvv_vl(avl);
+ set_rd(rvv_vl());
+ rvv_trace_status();
+ } else {
+ DCHECK_EQ(instr_.InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0xC0000000),
+ RO_V_VSETIVLI);
+ uint64_t avl;
+ set_rvv_vtype(rvv_zimm());
+ avl = instr_.Rvvuimm();
+ avl = avl <= rvv_vlmax()
+ ? avl
+ : avl < (rvv_vlmax() * 2) ? avl / 2 : rvv_vlmax();
+ set_rvv_vl(avl);
+ set_rd(rvv_vl());
+ rvv_trace_status();
+ break;
+ }
+ break;
+ }
+ default:
+ FATAL("Error: Unsupport on FILE:%s:%d.", __FILE__, __LINE__);
+ }
+}
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
@@ -3473,6 +4936,9 @@ void Simulator::InstructionDecode(Instruction* instr) {
case Instruction::kCSType:
DecodeCSType();
break;
+ case Instruction::kVType:
+ DecodeVType();
+ break;
default:
if (1) {
std::cout << "Unrecognized instruction [@pc=0x" << std::hex
@@ -3483,7 +4949,7 @@ void Simulator::InstructionDecode(Instruction* instr) {
}
if (::v8::internal::FLAG_trace_sim) {
- PrintF(" 0x%012" PRIxPTR " %-44s %s\n",
+ PrintF(" 0x%012" PRIxPTR " %-44s\t%s\n",
reinterpret_cast<intptr_t>(instr), buffer.begin(),
trace_buf_.begin());
}
@@ -3524,8 +4990,6 @@ void Simulator::CallInternal(Address entry) {
set_register(ra, end_sim_pc);
// Remember the values of callee-saved registers.
- // The code below assumes that r9 is not used as sb (static base) in
- // simulator code and therefore is regarded as a callee-saved register.
int64_t s0_val = get_register(s0);
int64_t s1_val = get_register(s1);
int64_t s2_val = get_register(s2);
@@ -3534,9 +4998,12 @@ void Simulator::CallInternal(Address entry) {
int64_t s5_val = get_register(s5);
int64_t s6_val = get_register(s6);
int64_t s7_val = get_register(s7);
+ int64_t s8_val = get_register(s8);
+ int64_t s9_val = get_register(s9);
+ int64_t s10_val = get_register(s10);
+ int64_t s11_val = get_register(s11);
int64_t gp_val = get_register(gp);
int64_t sp_val = get_register(sp);
- int64_t fp_val = get_register(fp);
// Set up the callee-saved registers with a known value. To be able to check
// that they are preserved properly across JS execution.
@@ -3549,8 +5016,11 @@ void Simulator::CallInternal(Address entry) {
set_register(s5, callee_saved_value);
set_register(s6, callee_saved_value);
set_register(s7, callee_saved_value);
+ set_register(s8, callee_saved_value);
+ set_register(s9, callee_saved_value);
+ set_register(s10, callee_saved_value);
+ set_register(s11, callee_saved_value);
set_register(gp, callee_saved_value);
- set_register(fp, callee_saved_value);
// Start the simulation.
Execute();
@@ -3564,8 +5034,11 @@ void Simulator::CallInternal(Address entry) {
CHECK_EQ(callee_saved_value, get_register(s5));
CHECK_EQ(callee_saved_value, get_register(s6));
CHECK_EQ(callee_saved_value, get_register(s7));
+ CHECK_EQ(callee_saved_value, get_register(s8));
+ CHECK_EQ(callee_saved_value, get_register(s9));
+ CHECK_EQ(callee_saved_value, get_register(s10));
+ CHECK_EQ(callee_saved_value, get_register(s11));
CHECK_EQ(callee_saved_value, get_register(gp));
- CHECK_EQ(callee_saved_value, get_register(fp));
// Restore callee-saved registers with the original value.
set_register(s0, s0_val);
@@ -3576,9 +5049,12 @@ void Simulator::CallInternal(Address entry) {
set_register(s5, s5_val);
set_register(s6, s6_val);
set_register(s7, s7_val);
+ set_register(s8, s8_val);
+ set_register(s9, s9_val);
+ set_register(s10, s10_val);
+ set_register(s11, s11_val);
set_register(gp, gp_val);
set_register(sp, sp_val);
- set_register(fp, fp_val);
}
intptr_t Simulator::CallImpl(Address entry, int argument_count,
@@ -3586,15 +5062,12 @@ intptr_t Simulator::CallImpl(Address entry, int argument_count,
constexpr int kRegisterPassedArguments = 8;
// Set up arguments.
- // First four arguments passed in registers in both ABI's.
+ // RISC-V 64G ISA has a0-a7 for passing arguments
int reg_arg_count = std::min(kRegisterPassedArguments, argument_count);
if (reg_arg_count > 0) set_register(a0, arguments[0]);
if (reg_arg_count > 1) set_register(a1, arguments[1]);
if (reg_arg_count > 2) set_register(a2, arguments[2]);
if (reg_arg_count > 3) set_register(a3, arguments[3]);
-
- // Up to eight arguments passed in registers in N64 ABI.
- // TODO(plind): N64 ABI calls these regs a4 - a7. Clarify this.
if (reg_arg_count > 4) set_register(a4, arguments[4]);
if (reg_arg_count > 5) set_register(a5, arguments[5]);
if (reg_arg_count > 6) set_register(a6, arguments[6]);
@@ -3602,12 +5075,13 @@ intptr_t Simulator::CallImpl(Address entry, int argument_count,
if (::v8::internal::FLAG_trace_sim) {
std::cout << "CallImpl: reg_arg_count = " << reg_arg_count << std::hex
- << " entry-pc (JSEntry) = 0x" << entry << " a0 (Isolate) = 0x"
- << get_register(a0) << " a1 (orig_func/new_target) = 0x"
- << get_register(a1) << " a2 (func/target) = 0x"
- << get_register(a2) << " a3 (receiver) = 0x" << get_register(a3)
- << " a4 (argc) = 0x" << get_register(a4) << " a5 (argv) = 0x"
- << get_register(a5) << std::endl;
+ << " entry-pc (JSEntry) = 0x" << entry
+ << " a0 (Isolate-root) = 0x" << get_register(a0)
+ << " a1 (orig_func/new_target) = 0x" << get_register(a1)
+ << " a2 (func/target) = 0x" << get_register(a2)
+ << " a3 (receiver) = 0x" << get_register(a3) << " a4 (argc) = 0x"
+ << get_register(a4) << " a5 (argv) = 0x" << get_register(a5)
+ << std::endl;
}
// Remaining arguments passed on stack.
diff --git a/deps/v8/src/execution/riscv64/simulator-riscv64.h b/deps/v8/src/execution/riscv64/simulator-riscv64.h
index 2fa40cea4e..90f0edec4c 100644
--- a/deps/v8/src/execution/riscv64/simulator-riscv64.h
+++ b/deps/v8/src/execution/riscv64/simulator-riscv64.h
@@ -299,6 +299,42 @@ class Simulator : public SimulatorBase {
kNumFPURegisters
};
+ enum VRegister {
+ v0,
+ v1,
+ v2,
+ v3,
+ v4,
+ v5,
+ v6,
+ v7,
+ v8,
+ v9,
+ v10,
+ v11,
+ v12,
+ v13,
+ v14,
+ v15,
+ v16,
+ v17,
+ v18,
+ v19,
+ v20,
+ v21,
+ v22,
+ v23,
+ v24,
+ v25,
+ v26,
+ v27,
+ v28,
+ v29,
+ v30,
+ v31,
+ kNumVRegisters
+ };
+
explicit Simulator(Isolate* isolate);
~Simulator();
@@ -312,7 +348,7 @@ class Simulator : public SimulatorBase {
void set_register(int reg, int64_t value);
void set_register_word(int reg, int32_t value);
void set_dw_register(int dreg, const int* dbl);
- int64_t get_register(int reg) const;
+ V8_EXPORT_PRIVATE int64_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
// Same for FPURegisters.
@@ -338,6 +374,59 @@ class Simulator : public SimulatorBase {
void set_fflags(uint32_t flags) { set_csr_bits(csr_fflags, flags); }
void clear_fflags(int32_t flags) { clear_csr_bits(csr_fflags, flags); }
+ // RVV CSR
+ __int128_t get_vregister(int vreg) const;
+ inline uint64_t rvv_vlen() const { return kRvvVLEN; }
+ inline uint64_t rvv_vtype() const { return vtype_; }
+ inline uint64_t rvv_vl() const { return vl_; }
+ inline uint64_t rvv_vstart() const { return vstart_; }
+ inline uint64_t rvv_vxsat() const { return vxsat_; }
+ inline uint64_t rvv_vxrm() const { return vxrm_; }
+ inline uint64_t rvv_vcsr() const { return vcsr_; }
+ inline uint64_t rvv_vlenb() const { return vlenb_; }
+ inline uint32_t rvv_zimm() const { return instr_.Rvvzimm(); }
+ inline uint32_t rvv_vlmul() const { return (rvv_vtype() & 0x7); }
+ inline uint32_t rvv_vsew() const { return ((rvv_vtype() >> 3) & 0x7); }
+
+ inline const char* rvv_sew_s() const {
+ uint32_t vsew = rvv_vsew();
+ switch (vsew) {
+#define CAST_VSEW(name) \
+ case name: \
+ return #name;
+ RVV_SEW(CAST_VSEW)
+ default:
+ return "unknown";
+#undef CAST_VSEW
+ }
+ }
+
+ inline const char* rvv_lmul_s() const {
+ uint32_t vlmul = rvv_vlmul();
+ switch (vlmul) {
+#define CAST_VLMUL(name) \
+ case name: \
+ return #name;
+ RVV_LMUL(CAST_VLMUL)
+ default:
+ return "unknown";
+#undef CAST_VSEW
+ }
+ }
+
+ // return size of lane.8 16 32 64
+ inline uint32_t rvv_sew() const {
+ DCHECK_EQ(rvv_vsew() & (~0x7), 0x0);
+ return (0x1 << rvv_vsew()) * 8;
+ }
+ inline uint64_t rvv_vlmax() const {
+ if ((rvv_vlmul() & 0b100) != 0) {
+ return (rvv_vlen() / rvv_sew()) >> (rvv_vlmul() & 0b11);
+ } else {
+ return ((rvv_vlen() << rvv_vlmul()) / rvv_sew());
+ }
+ }
+
inline uint32_t get_dynamic_rounding_mode();
inline bool test_fflags_bits(uint32_t mask);
@@ -354,7 +443,7 @@ class Simulator : public SimulatorBase {
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int64_t value);
- int64_t get_pc() const;
+ V8_EXPORT_PRIVATE int64_t get_pc() const;
Address get_sp() const { return static_cast<Address>(get_register(sp)); }
@@ -550,6 +639,234 @@ class Simulator : public SimulatorBase {
}
}
+ // RVV
+ // The following code about RVV was based from:
+ // https://github.com/riscv/riscv-isa-sim
+ // Copyright (c) 2010-2017, The Regents of the University of California
+ // (Regents). All Rights Reserved.
+
+ // Redistribution and use in source and binary forms, with or without
+ // modification, are permitted provided that the following conditions are met:
+ // 1. Redistributions of source code must retain the above copyright
+ // notice, this list of conditions and the following disclaimer.
+ // 2. Redistributions in binary form must reproduce the above copyright
+ // notice, this list of conditions and the following disclaimer in the
+ // documentation and/or other materials provided with the distribution.
+ // 3. Neither the name of the Regents nor the
+ // names of its contributors may be used to endorse or promote products
+ // derived from this software without specific prior written permission.
+
+ // IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+ // SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS,
+ // ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
+ // REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ // REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED
+ // TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ // PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
+ // HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
+ // MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+ template <uint64_t N>
+ struct type_usew_t;
+ template <>
+ struct type_usew_t<8> {
+ using type = uint8_t;
+ };
+
+ template <>
+ struct type_usew_t<16> {
+ using type = uint16_t;
+ };
+
+ template <>
+ struct type_usew_t<32> {
+ using type = uint32_t;
+ };
+
+ template <>
+ struct type_usew_t<64> {
+ using type = uint64_t;
+ };
+
+ template <>
+ struct type_usew_t<128> {
+ using type = __uint128_t;
+ };
+ template <uint64_t N>
+ struct type_sew_t;
+
+ template <>
+ struct type_sew_t<8> {
+ using type = int8_t;
+ };
+
+ template <>
+ struct type_sew_t<16> {
+ using type = int16_t;
+ };
+
+ template <>
+ struct type_sew_t<32> {
+ using type = int32_t;
+ };
+
+ template <>
+ struct type_sew_t<64> {
+ using type = int64_t;
+ };
+
+ template <>
+ struct type_sew_t<128> {
+ using type = __int128_t;
+ };
+
+#define VV_PARAMS(x) \
+ type_sew_t<x>::type& vd = \
+ Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_sew_t<x>::type vs1 = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), i); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VV_UPARAMS(x) \
+ type_usew_t<x>::type& vd = \
+ Rvvelt<type_usew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_usew_t<x>::type vs1 = Rvvelt<type_usew_t<x>::type>(rvv_vs1_reg(), i); \
+ type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VX_PARAMS(x) \
+ type_sew_t<x>::type& vd = \
+ Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_sew_t<x>::type rs1 = (type_sew_t<x>::type)(get_register(rs1_reg())); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VX_UPARAMS(x) \
+ type_usew_t<x>::type& vd = \
+ Rvvelt<type_usew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_usew_t<x>::type rs1 = (type_usew_t<x>::type)(get_register(rs1_reg())); \
+ type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VI_PARAMS(x) \
+ type_sew_t<x>::type& vd = \
+ Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_sew_t<x>::type simm5 = (type_sew_t<x>::type)(instr_.RvvSimm5()); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VI_UPARAMS(x) \
+ type_usew_t<x>::type& vd = \
+ Rvvelt<type_usew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_usew_t<x>::type uimm5 = (type_usew_t<x>::type)(instr_.RvvUimm5()); \
+ type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VXI_PARAMS(x) \
+ type_sew_t<x>::type& vd = \
+ Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_sew_t<x>::type vs1 = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), i); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i); \
+ type_sew_t<x>::type rs1 = (type_sew_t<x>::type)(get_register(rs1_reg())); \
+ type_sew_t<x>::type simm5 = (type_sew_t<x>::type)(instr_.RvvSimm5());
+
+#define VI_XI_SLIDEDOWN_PARAMS(x, off) \
+ auto& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ auto vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i + off);
+
+#define VI_XI_SLIDEUP_PARAMS(x, offset) \
+ auto& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ auto vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i - offset);
+
+ inline void rvv_trace_vd() {
+ if (::v8::internal::FLAG_trace_sim) {
+ __int128_t value = Vregister_[rvv_vd_reg()];
+ SNPrintF(trace_buf_, "0x%016" PRIx64 "%016" PRIx64 " (%" PRId64 ")",
+ *(reinterpret_cast<int64_t*>(&value) + 1),
+ *reinterpret_cast<int64_t*>(&value), icount_);
+ }
+ }
+
+ inline void rvv_trace_vs1() {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("\t%s:0x%016" PRIx64 "%016" PRIx64 "\n",
+ v8::internal::VRegisters::Name(static_cast<int>(rvv_vs1_reg())),
+ (uint64_t)(get_vregister(static_cast<int>(rvv_vs1_reg())) >> 64),
+ (uint64_t)get_vregister(static_cast<int>(rvv_vs1_reg())));
+ }
+ }
+
+ inline void rvv_trace_vs2() {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("\t%s:0x%016" PRIx64 "%016" PRIx64 "\n",
+ v8::internal::VRegisters::Name(static_cast<int>(rvv_vs2_reg())),
+ (uint64_t)(get_vregister(static_cast<int>(rvv_vs2_reg())) >> 64),
+ (uint64_t)get_vregister(static_cast<int>(rvv_vs2_reg())));
+ }
+ }
+ inline void rvv_trace_v0() {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("\t%s:0x%016" PRIx64 "%016" PRIx64 "\n",
+ v8::internal::VRegisters::Name(v0),
+ (uint64_t)(get_vregister(v0) >> 64), (uint64_t)get_vregister(v0));
+ }
+ }
+
+ inline void rvv_trace_rs1() {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("\t%s:0x%016" PRIx64 "\n",
+ v8::internal::Registers::Name(static_cast<int>(rs1_reg())),
+ (uint64_t)(get_register(rs1_reg())));
+ }
+ }
+
+ inline void rvv_trace_status() {
+ if (::v8::internal::FLAG_trace_sim) {
+ int i = 0;
+ for (; i < trace_buf_.length(); i++) {
+ if (trace_buf_[i] == '\0') break;
+ }
+ SNPrintF(trace_buf_.SubVector(i, trace_buf_.length()),
+ " sew:%s lmul:%s vstart:%lu vl:%lu", rvv_sew_s(), rvv_lmul_s(),
+ rvv_vstart(), rvv_vl());
+ }
+ }
+
+ template <class T>
+ T& Rvvelt(reg_t vReg, uint64_t n, bool is_write = false) {
+ CHECK_NE(rvv_sew(), 0);
+ CHECK_GT((rvv_vlen() >> 3) / sizeof(T), 0);
+ reg_t elts_per_reg = (rvv_vlen() >> 3) / (sizeof(T));
+ vReg += n / elts_per_reg;
+ n = n % elts_per_reg;
+ T* regStart = reinterpret_cast<T*>(reinterpret_cast<char*>(Vregister_) +
+ vReg * (rvv_vlen() >> 3));
+ return regStart[n];
+ }
+
+ inline int32_t rvv_vs1_reg() { return instr_.Vs1Value(); }
+ inline reg_t rvv_vs1() { UNIMPLEMENTED(); }
+ inline int32_t rvv_vs2_reg() { return instr_.Vs2Value(); }
+ inline reg_t rvv_vs2() { UNIMPLEMENTED(); }
+ inline int32_t rvv_vd_reg() { return instr_.VdValue(); }
+ inline int32_t rvv_vs3_reg() { return instr_.VdValue(); }
+ inline reg_t rvv_vd() { UNIMPLEMENTED(); }
+ inline int32_t rvv_nf() {
+ return (instr_.InstructionBits() & kRvvNfMask) >> kRvvNfShift;
+ }
+
+ inline void set_vrd() { UNIMPLEMENTED(); }
+
+ inline void set_rvv_vtype(uint64_t value, bool trace = true) {
+ vtype_ = value;
+ }
+ inline void set_rvv_vl(uint64_t value, bool trace = true) { vl_ = value; }
+ inline void set_rvv_vstart(uint64_t value, bool trace = true) {
+ vstart_ = value;
+ }
+ inline void set_rvv_vxsat(uint64_t value, bool trace = true) {
+ vxsat_ = value;
+ }
+ inline void set_rvv_vxrm(uint64_t value, bool trace = true) { vxrm_ = value; }
+ inline void set_rvv_vcsr(uint64_t value, bool trace = true) { vcsr_ = value; }
+ inline void set_rvv_vlenb(uint64_t value, bool trace = true) {
+ vlenb_ = value;
+ }
+
template <typename T, typename Func>
inline T CanonicalizeFPUOp3(Func fn) {
DCHECK(std::is_floating_point<T>::value);
@@ -634,6 +951,14 @@ class Simulator : public SimulatorBase {
void DecodeCSType();
void DecodeCJType();
void DecodeCBType();
+ void DecodeVType();
+ void DecodeRvvIVV();
+ void DecodeRvvIVI();
+ void DecodeRvvIVX();
+ void DecodeRvvMVV();
+ void DecodeRvvMVX();
+ bool DecodeRvvVL();
+ bool DecodeRvvVS();
// Used for breakpoints and traps.
void SoftwareInterrupt();
@@ -700,6 +1025,10 @@ class Simulator : public SimulatorBase {
// Floating-point control and status register.
uint32_t FCSR_;
+ // RVV registers
+ __int128_t Vregister_[kNumVRegisters];
+ static_assert(sizeof(__int128_t) == kRvvVLEN / 8, "unmatch vlen");
+ uint64_t vstart_, vxsat_, vxrm_, vcsr_, vtype_, vl_, vlenb_;
// Simulator support.
// Allocate 1MB for stack.
size_t stack_size_;
@@ -707,7 +1036,7 @@ class Simulator : public SimulatorBase {
bool pc_modified_;
int64_t icount_;
int break_count_;
- base::EmbeddedVector<char, 128> trace_buf_;
+ base::EmbeddedVector<char, 256> trace_buf_;
// Debugger input.
char* last_debugger_input_;
@@ -820,7 +1149,6 @@ class Simulator : public SimulatorBase {
LocalMonitor local_monitor_;
GlobalMonitor::LinkedAddress global_monitor_thread_;
};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/runtime-profiler.cc b/deps/v8/src/execution/runtime-profiler.cc
index 5ce45f43c2..4d710c5aaa 100644
--- a/deps/v8/src/execution/runtime-profiler.cc
+++ b/deps/v8/src/execution/runtime-profiler.cc
@@ -20,24 +20,11 @@
namespace v8 {
namespace internal {
-// Number of times a function has to be seen on the stack before it is
-// optimized.
-static const int kProfilerTicksBeforeOptimization = 3;
-
-// The number of ticks required for optimizing a function increases with
-// the size of the bytecode. This is in addition to the
-// kProfilerTicksBeforeOptimization required for any function.
-static const int kBytecodeSizeAllowancePerTick = 1100;
-
// Maximum size in bytes of generate code for a function to allow OSR.
static const int kOSRBytecodeSizeAllowanceBase = 119;
static const int kOSRBytecodeSizeAllowancePerTick = 44;
-// Maximum size in bytes of generated code for a function to be optimized
-// the very first time it is seen on the stack.
-static const int kMaxBytecodeSizeForEarlyOpt = 81;
-
#define OPTIMIZATION_REASON_LIST(V) \
V(DoNotOptimize, "do not optimize") \
V(HotAndStable, "hot and stable") \
@@ -191,7 +178,7 @@ namespace {
bool ShouldOptimizeAsSmallFunction(int bytecode_size, int ticks,
bool any_ic_changed,
bool active_tier_is_turboprop) {
- if (any_ic_changed || bytecode_size >= kMaxBytecodeSizeForEarlyOpt)
+ if (any_ic_changed || bytecode_size >= FLAG_max_bytecode_size_for_early_opt)
return false;
return true;
}
@@ -209,8 +196,8 @@ OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction function,
int ticks = function.feedback_vector().profiler_ticks();
bool active_tier_is_turboprop = function.ActiveTierIsMidtierTurboprop();
int ticks_for_optimization =
- kProfilerTicksBeforeOptimization +
- (bytecode.length() / kBytecodeSizeAllowancePerTick);
+ FLAG_ticks_before_optimization +
+ (bytecode.length() / FLAG_bytecode_size_allowance_per_tick);
if (ticks >= ticks_for_optimization) {
return OptimizationReason::kHotAndStable;
} else if (ShouldOptimizeAsSmallFunction(bytecode.length(), ticks,
@@ -227,7 +214,7 @@ OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction function,
PrintF("ICs changed]\n");
} else {
PrintF(" too large for small function optimization: %d/%d]\n",
- bytecode.length(), kMaxBytecodeSizeForEarlyOpt);
+ bytecode.length(), FLAG_max_bytecode_size_for_early_opt);
}
}
return OptimizationReason::kDoNotOptimize;
@@ -250,7 +237,7 @@ void RuntimeProfiler::MarkCandidatesForOptimization(JavaScriptFrame* frame) {
MarkCandidatesForOptimizationScope scope(this);
JSFunction function = frame->function();
- CodeKind code_kind = function.GetActiveTier();
+ CodeKind code_kind = function.GetActiveTier().value();
DCHECK(function.shared().is_compiled());
DCHECK(function.shared().IsInterpreted());
diff --git a/deps/v8/src/execution/s390/simulator-s390.cc b/deps/v8/src/execution/s390/simulator-s390.cc
index 88a8cb4121..31a03eed4e 100644
--- a/deps/v8/src/execution/s390/simulator-s390.cc
+++ b/deps/v8/src/execution/s390/simulator-s390.cc
@@ -109,7 +109,6 @@ bool S390Debugger::GetValue(const char* desc, intptr_t* value) {
1;
}
}
- return false;
}
bool S390Debugger::GetFPDoubleValue(const char* desc, double* value) {
@@ -758,8 +757,14 @@ void Simulator::EvalTableInit() {
V(vlrep, VLREP, 0xE705) /* type = VRX VECTOR LOAD AND REPLICATE */ \
V(vrepi, VREPI, 0xE745) /* type = VRI_A VECTOR REPLICATE IMMEDIATE */ \
V(vlr, VLR, 0xE756) /* type = VRR_A VECTOR LOAD */ \
+ V(vsteb, VSTEB, 0xE708) /* type = VRX VECTOR STORE ELEMENT (8) */ \
+ V(vsteh, VSTEH, 0xE709) /* type = VRX VECTOR STORE ELEMENT (16) */ \
V(vstef, VSTEF, 0xE70B) /* type = VRX VECTOR STORE ELEMENT (32) */ \
+ V(vsteg, VSTEG, 0xE70A) /* type = VRX VECTOR STORE ELEMENT (64) */ \
+ V(vleb, VLEB, 0xE701) /* type = VRX VECTOR LOAD ELEMENT (8) */ \
+ V(vleh, VLEH, 0xE701) /* type = VRX VECTOR LOAD ELEMENT (16) */ \
V(vlef, VLEF, 0xE703) /* type = VRX VECTOR LOAD ELEMENT (32) */ \
+ V(vleg, VLEG, 0xE702) /* type = VRX VECTOR LOAD ELEMENT (64) */ \
V(vavgl, VAVGL, 0xE7F0) /* type = VRR_C VECTOR AVERAGE LOGICAL */ \
V(va, VA, 0xE7F3) /* type = VRR_C VECTOR ADD */ \
V(vs, VS, 0xE7F7) /* type = VRR_C VECTOR SUBTRACT */ \
@@ -1775,50 +1780,50 @@ void Simulator::TrashCallerSaveRegisters() {
#endif
}
-uint32_t Simulator::ReadWU(intptr_t addr, Instruction* instr) {
+uint32_t Simulator::ReadWU(intptr_t addr) {
uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
return *ptr;
}
-int64_t Simulator::ReadW64(intptr_t addr, Instruction* instr) {
+int64_t Simulator::ReadW64(intptr_t addr) {
int64_t* ptr = reinterpret_cast<int64_t*>(addr);
return *ptr;
}
-int32_t Simulator::ReadW(intptr_t addr, Instruction* instr) {
+int32_t Simulator::ReadW(intptr_t addr) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return *ptr;
}
-void Simulator::WriteW(intptr_t addr, uint32_t value, Instruction* instr) {
+void Simulator::WriteW(intptr_t addr, uint32_t value) {
uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
*ptr = value;
return;
}
-void Simulator::WriteW(intptr_t addr, int32_t value, Instruction* instr) {
+void Simulator::WriteW(intptr_t addr, int32_t value) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
*ptr = value;
return;
}
-uint16_t Simulator::ReadHU(intptr_t addr, Instruction* instr) {
+uint16_t Simulator::ReadHU(intptr_t addr) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
}
-int16_t Simulator::ReadH(intptr_t addr, Instruction* instr) {
+int16_t Simulator::ReadH(intptr_t addr) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
}
-void Simulator::WriteH(intptr_t addr, uint16_t value, Instruction* instr) {
+void Simulator::WriteH(intptr_t addr, uint16_t value) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
return;
}
-void Simulator::WriteH(intptr_t addr, int16_t value, Instruction* instr) {
+void Simulator::WriteH(intptr_t addr, int16_t value) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
return;
@@ -2036,7 +2041,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
if (!stack_aligned) {
PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
@@ -2076,7 +2080,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
switch (redirection->type()) {
@@ -2090,7 +2093,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -3187,12 +3189,57 @@ EVALUATE(VLR) {
return length;
}
+EVALUATE(VSTEB) {
+ DCHECK_OPCODE(VSTEB);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int8_t value = get_simd_register_by_lane<int8_t>(r1, m3);
+ WriteB(addr, value);
+ return length;
+}
+
+EVALUATE(VSTEH) {
+ DCHECK_OPCODE(VSTEH);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int16_t value = get_simd_register_by_lane<int16_t>(r1, m3);
+ WriteH(addr, value);
+ return length;
+}
+
EVALUATE(VSTEF) {
DCHECK_OPCODE(VSTEF);
DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
intptr_t addr = GET_ADDRESS(x2, b2, d2);
int32_t value = get_simd_register_by_lane<int32_t>(r1, m3);
- WriteW(addr, value, instr);
+ WriteW(addr, value);
+ return length;
+}
+
+EVALUATE(VSTEG) {
+ DCHECK_OPCODE(VSTEG);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int64_t value = get_simd_register_by_lane<int64_t>(r1, m3);
+ WriteDW(addr, value);
+ return length;
+}
+
+EVALUATE(VLEB) {
+ DCHECK_OPCODE(VLEB);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int8_t value = ReadB(addr);
+ set_simd_register_by_lane<int8_t>(r1, m3, value);
+ return length;
+}
+
+EVALUATE(VLEH) {
+ DCHECK_OPCODE(VLEH);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int16_t value = ReadH(addr);
+ set_simd_register_by_lane<int16_t>(r1, m3, value);
return length;
}
@@ -3200,11 +3247,20 @@ EVALUATE(VLEF) {
DCHECK_OPCODE(VLEF);
DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
intptr_t addr = GET_ADDRESS(x2, b2, d2);
- int32_t value = ReadW(addr, instr);
+ int32_t value = ReadW(addr);
set_simd_register_by_lane<int32_t>(r1, m3, value);
return length;
}
+EVALUATE(VLEG) {
+ DCHECK_OPCODE(VLEG);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ uint64_t value = ReadDW(addr);
+ set_simd_register_by_lane<uint64_t>(r1, m3, value);
+ return length;
+}
+
// TODO(john): unify most fp binary operations
template <class T, class Operation>
inline static void VectorBinaryOp(Simulator* sim, int dst, int src1, int src2,
@@ -4368,20 +4424,35 @@ EVALUATE(VFMAX) {
#undef CASE
template <class S, class D, class Operation>
-void VectorFPCompare(Simulator* sim, int dst, int src1, int src2,
+void VectorFPCompare(Simulator* sim, int dst, int src1, int src2, int m6,
Operation op) {
static_assert(sizeof(S) == sizeof(D),
"Expect input type size == output type size");
+ bool some_zero = false;
+ bool all_zero = true;
FOR_EACH_LANE(i, D) {
S src1_val = sim->get_simd_register_by_lane<S>(src1, i);
S src2_val = sim->get_simd_register_by_lane<S>(src2, i);
D value = op(src1_val, src2_val);
sim->set_simd_register_by_lane<D>(dst, i, value);
+ if (value) {
+ all_zero = false;
+ } else {
+ some_zero = true;
+ }
+ }
+ // TODO(miladfarca) implement other conditions.
+ if (m6) {
+ if (all_zero) {
+ sim->condition_reg_ = CC_OF;
+ } else if (some_zero) {
+ sim->condition_reg_ = 0x04;
+ }
}
}
-#define VECTOR_FP_COMPARE_FOR_TYPE(S, D, op) \
- VectorFPCompare<S, D>(this, r1, r2, r3, \
+#define VECTOR_FP_COMPARE_FOR_TYPE(S, D, op) \
+ VectorFPCompare<S, D>(this, r1, r2, r3, m6, \
[](S a, S b) { return (a op b) ? -1 : 0; });
#define VECTOR_FP_COMPARE(op) \
@@ -4415,7 +4486,6 @@ void VectorFPCompare(Simulator* sim, int dst, int src1, int src2,
EVALUATE(VFCE) {
DCHECK_OPCODE(VFCE);
DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
- USE(m6);
VECTOR_FP_COMPARE(==)
return length;
}
@@ -4578,7 +4648,7 @@ EVALUATE(L) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int32_t mem_val = ReadW(addr, instr);
+ int32_t mem_val = ReadW(addr);
set_low_register(r1, mem_val);
return length;
}
@@ -4727,7 +4797,7 @@ EVALUATE(LGF) {
DCHECK_OPCODE(LGF);
DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
intptr_t addr = GET_ADDRESS(x2, b2, d2);
- int64_t mem_val = static_cast<int64_t>(ReadW(addr, instr));
+ int64_t mem_val = static_cast<int64_t>(ReadW(addr));
set_register(r1, mem_val);
return length;
}
@@ -4739,7 +4809,7 @@ EVALUATE(ST) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- WriteW(addr, r1_val, instr);
+ WriteW(addr, r1_val);
return length;
}
@@ -4757,7 +4827,7 @@ EVALUATE(STY) {
DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
intptr_t addr = GET_ADDRESS(x2, b2, d2);
uint32_t value = get_low_register<uint32_t>(r1);
- WriteW(addr, value, instr);
+ WriteW(addr, value);
return length;
}
@@ -4765,7 +4835,7 @@ EVALUATE(LY) {
DCHECK_OPCODE(LY);
DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
intptr_t addr = GET_ADDRESS(x2, b2, d2);
- uint32_t mem_val = ReadWU(addr, instr);
+ uint32_t mem_val = ReadWU(addr);
set_low_register(r1, mem_val);
return length;
}
@@ -5166,7 +5236,7 @@ EVALUATE(STH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t mem_addr = b2_val + x2_val + d2_val;
- WriteH(mem_addr, r1_val, instr);
+ WriteH(mem_addr, r1_val);
return length;
}
@@ -5248,7 +5318,7 @@ EVALUATE(LH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = x2_val + b2_val + d2_val;
- int32_t result = static_cast<int32_t>(ReadH(mem_addr, instr));
+ int32_t result = static_cast<int32_t>(ReadH(mem_addr));
set_low_register(r1, result);
return length;
}
@@ -5266,7 +5336,7 @@ EVALUATE(AH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int32_t mem_val = static_cast<int32_t>(ReadH(addr, instr));
+ int32_t mem_val = static_cast<int32_t>(ReadH(addr));
int32_t alu_out = 0;
bool isOF = false;
isOF = CheckOverflowForIntAdd(r1_val, mem_val, int32_t);
@@ -5285,7 +5355,7 @@ EVALUATE(SH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int32_t mem_val = static_cast<int32_t>(ReadH(addr, instr));
+ int32_t mem_val = static_cast<int32_t>(ReadH(addr));
int32_t alu_out = 0;
bool isOF = false;
isOF = CheckOverflowForIntSub(r1_val, mem_val, int32_t);
@@ -5303,7 +5373,7 @@ EVALUATE(MH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int32_t mem_val = static_cast<int32_t>(ReadH(addr, instr));
+ int32_t mem_val = static_cast<int32_t>(ReadH(addr));
int32_t alu_out = 0;
alu_out = r1_val * mem_val;
set_low_register(r1, alu_out);
@@ -5341,7 +5411,7 @@ EVALUATE(N) {
int32_t r1_val = get_low_register<int32_t>(r1);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
- int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val);
int32_t alu_out = 0;
alu_out = r1_val & mem_val;
SetS390BitWiseConditionCode<uint32_t>(alu_out);
@@ -5356,7 +5426,7 @@ EVALUATE(CL) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int32_t mem_val = ReadW(addr, instr);
+ int32_t mem_val = ReadW(addr);
SetS390ConditionCode<uint32_t>(r1_val, mem_val);
return length;
}
@@ -5368,7 +5438,7 @@ EVALUATE(O) {
int32_t r1_val = get_low_register<int32_t>(r1);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
- int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val);
int32_t alu_out = 0;
alu_out = r1_val | mem_val;
SetS390BitWiseConditionCode<uint32_t>(alu_out);
@@ -5383,7 +5453,7 @@ EVALUATE(X) {
int32_t r1_val = get_low_register<int32_t>(r1);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
- int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val);
int32_t alu_out = 0;
alu_out = r1_val ^ mem_val;
SetS390BitWiseConditionCode<uint32_t>(alu_out);
@@ -5398,7 +5468,7 @@ EVALUATE(C) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int32_t mem_val = ReadW(addr, instr);
+ int32_t mem_val = ReadW(addr);
SetS390ConditionCode<int32_t>(r1_val, mem_val);
return length;
}
@@ -5410,7 +5480,7 @@ EVALUATE(A) {
int32_t r1_val = get_low_register<int32_t>(r1);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
- int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val);
int32_t alu_out = 0;
bool isOF = false;
isOF = CheckOverflowForIntAdd(r1_val, mem_val, int32_t);
@@ -5428,7 +5498,7 @@ EVALUATE(S) {
int32_t r1_val = get_low_register<int32_t>(r1);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
- int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val);
int32_t alu_out = 0;
bool isOF = false;
isOF = CheckOverflowForIntSub(r1_val, mem_val, int32_t);
@@ -5446,7 +5516,7 @@ EVALUATE(M) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
DCHECK_EQ(r1 % 2, 0);
- int32_t mem_val = ReadW(addr, instr);
+ int32_t mem_val = ReadW(addr);
int32_t r1_val = get_low_register<int32_t>(r1 + 1);
int64_t product =
static_cast<int64_t>(r1_val) * static_cast<int64_t>(mem_val);
@@ -5511,7 +5581,7 @@ EVALUATE(STE) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
int32_t frs_val = get_fpr<int32_t>(r1);
- WriteW(addr, frs_val, instr);
+ WriteW(addr, frs_val);
return length;
}
@@ -5520,7 +5590,7 @@ EVALUATE(MS) {
DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
- int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val);
int32_t r1_val = get_low_register<int32_t>(r1);
set_low_register(r1, r1_val * mem_val);
return length;
@@ -5733,7 +5803,7 @@ EVALUATE(STM) {
// Store each register in ascending order.
for (int i = 0; i <= r3 - r1; i++) {
int32_t value = get_low_register<int32_t>((r1 + i) % 16);
- WriteW(rb_val + offset + 4 * i, value, instr);
+ WriteW(rb_val + offset + 4 * i, value);
}
return length;
}
@@ -5793,7 +5863,7 @@ EVALUATE(LM) {
// Store each register in ascending order.
for (int i = 0; i <= r3 - r1; i++) {
- int32_t value = ReadW(rb_val + offset + 4 * i, instr);
+ int32_t value = ReadW(rb_val + offset + 4 * i);
set_low_register((r1 + i) % 16, value);
}
return length;
@@ -9254,7 +9324,7 @@ EVALUATE(LT) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
- int32_t value = ReadW(addr, instr);
+ int32_t value = ReadW(addr);
set_low_register(r1, value);
SetS390ConditionCode<int32_t>(value, 0);
return length;
@@ -9267,7 +9337,7 @@ EVALUATE(LGH) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
- int64_t mem_val = static_cast<int64_t>(ReadH(addr, instr));
+ int64_t mem_val = static_cast<int64_t>(ReadH(addr));
set_register(r1, mem_val);
return length;
}
@@ -9279,7 +9349,7 @@ EVALUATE(LLGF) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
- uint64_t mem_val = static_cast<uint64_t>(ReadWU(addr, instr));
+ uint64_t mem_val = static_cast<uint64_t>(ReadWU(addr));
set_register(r1, mem_val);
return length;
}
@@ -9298,7 +9368,7 @@ EVALUATE(AGF) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
uint64_t alu_out = r1_val;
- uint32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+ uint32_t mem_val = ReadW(b2_val + d2_val + x2_val);
alu_out += mem_val;
SetS390ConditionCode<int64_t>(alu_out, 0);
set_register(r1, alu_out);
@@ -9313,7 +9383,7 @@ EVALUATE(SGF) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
uint64_t alu_out = r1_val;
- uint32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+ uint32_t mem_val = ReadW(b2_val + d2_val + x2_val);
alu_out -= mem_val;
SetS390ConditionCode<int64_t>(alu_out, 0);
set_register(r1, alu_out);
@@ -9338,8 +9408,7 @@ EVALUATE(MSGF) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- int64_t mem_val =
- static_cast<int64_t>(ReadW(b2_val + d2_val + x2_val, instr));
+ int64_t mem_val = static_cast<int64_t>(ReadW(b2_val + d2_val + x2_val));
int64_t r1_val = get_register(r1);
int64_t product = r1_val * mem_val;
set_register(r1, product);
@@ -9353,8 +9422,7 @@ EVALUATE(DSGF) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- int64_t mem_val =
- static_cast<int64_t>(ReadW(b2_val + d2_val + x2_val, instr));
+ int64_t mem_val = static_cast<int64_t>(ReadW(b2_val + d2_val + x2_val));
int64_t r1_val = get_register(r1 + 1);
int64_t quotient = r1_val / mem_val;
int64_t remainder = r1_val % mem_val;
@@ -9369,7 +9437,7 @@ EVALUATE(LRVG) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = b2_val + x2_val + d2;
- int64_t mem_val = ReadW64(mem_addr, instr);
+ int64_t mem_val = ReadW64(mem_addr);
set_register(r1, ByteReverse(mem_val));
return length;
}
@@ -9380,7 +9448,7 @@ EVALUATE(LRV) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = b2_val + x2_val + d2;
- int32_t mem_val = ReadW(mem_addr, instr);
+ int32_t mem_val = ReadW(mem_addr);
set_low_register(r1, ByteReverse(mem_val));
return length;
}
@@ -9392,7 +9460,7 @@ EVALUATE(LRVH) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = b2_val + x2_val + d2;
- int16_t mem_val = ReadH(mem_addr, instr);
+ int16_t mem_val = ReadH(mem_addr);
int32_t result = ByteReverse(mem_val) & 0x0000FFFF;
result |= r1_val & 0xFFFF0000;
set_low_register(r1, result);
@@ -9478,7 +9546,7 @@ EVALUATE(STRV) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = b2_val + x2_val + d2;
- WriteW(mem_addr, ByteReverse(r1_val), instr);
+ WriteW(mem_addr, ByteReverse(r1_val));
return length;
}
@@ -9501,7 +9569,7 @@ EVALUATE(STRVH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = b2_val + x2_val + d2;
int16_t result = static_cast<int16_t>(r1_val >> 16);
- WriteH(mem_addr, ByteReverse(result), instr);
+ WriteH(mem_addr, ByteReverse(result));
return length;
}
@@ -9517,7 +9585,7 @@ EVALUATE(MSY) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- int32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+ int32_t mem_val = ReadW(b2_val + d2_val + x2_val);
int32_t r1_val = get_low_register<int32_t>(r1);
set_low_register(r1, mem_val * r1_val);
return length;
@@ -9529,7 +9597,7 @@ EVALUATE(MSC) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- int32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+ int32_t mem_val = ReadW(b2_val + d2_val + x2_val);
int32_t r1_val = get_low_register<int32_t>(r1);
int64_t result64 =
static_cast<int64_t>(r1_val) * static_cast<int64_t>(mem_val);
@@ -9548,7 +9616,7 @@ EVALUATE(NY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int32_t alu_out = get_low_register<int32_t>(r1);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
alu_out &= mem_val;
SetS390BitWiseConditionCode<uint32_t>(alu_out);
set_low_register(r1, alu_out);
@@ -9561,7 +9629,7 @@ EVALUATE(CLY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
uint32_t alu_out = get_low_register<uint32_t>(r1);
- uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2);
SetS390ConditionCode<uint32_t>(alu_out, mem_val);
return length;
}
@@ -9572,7 +9640,7 @@ EVALUATE(OY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int32_t alu_out = get_low_register<int32_t>(r1);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
alu_out |= mem_val;
SetS390BitWiseConditionCode<uint32_t>(alu_out);
set_low_register(r1, alu_out);
@@ -9585,7 +9653,7 @@ EVALUATE(XY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int32_t alu_out = get_low_register<int32_t>(r1);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
alu_out ^= mem_val;
SetS390BitWiseConditionCode<uint32_t>(alu_out);
set_low_register(r1, alu_out);
@@ -9598,7 +9666,7 @@ EVALUATE(CY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int32_t alu_out = get_low_register<int32_t>(r1);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
SetS390ConditionCode<int32_t>(alu_out, mem_val);
return length;
}
@@ -9609,7 +9677,7 @@ EVALUATE(AY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int32_t alu_out = get_low_register<int32_t>(r1);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
bool isOF = false;
isOF = CheckOverflowForIntAdd(alu_out, mem_val, int32_t);
alu_out += mem_val;
@@ -9625,7 +9693,7 @@ EVALUATE(SY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int32_t alu_out = get_low_register<int32_t>(r1);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
bool isOF = false;
isOF = CheckOverflowForIntSub(alu_out, mem_val, int32_t);
alu_out -= mem_val;
@@ -9641,7 +9709,7 @@ EVALUATE(MFY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
DCHECK_EQ(r1 % 2, 0);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
int32_t r1_val = get_low_register<int32_t>(r1 + 1);
int64_t product =
static_cast<int64_t>(r1_val) * static_cast<int64_t>(mem_val);
@@ -9659,7 +9727,7 @@ EVALUATE(ALY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
uint32_t alu_out = get_low_register<uint32_t>(r1);
- uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2);
alu_out += mem_val;
set_low_register(r1, alu_out);
SetS390ConditionCode<uint32_t>(alu_out, 0);
@@ -9672,7 +9740,7 @@ EVALUATE(SLY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
uint32_t alu_out = get_low_register<uint32_t>(r1);
- uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2);
alu_out -= mem_val;
set_low_register(r1, alu_out);
SetS390ConditionCode<uint32_t>(alu_out, 0);
@@ -9687,7 +9755,7 @@ EVALUATE(STHY) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
uint16_t value = get_low_register<uint32_t>(r1);
- WriteH(addr, value, instr);
+ WriteH(addr, value);
return length;
}
@@ -9759,7 +9827,7 @@ EVALUATE(LHY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
- int32_t result = static_cast<int32_t>(ReadH(addr, instr));
+ int32_t result = static_cast<int32_t>(ReadH(addr));
set_low_register(r1, result);
return length;
}
@@ -9777,8 +9845,7 @@ EVALUATE(AHY) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- int32_t mem_val =
- static_cast<int32_t>(ReadH(b2_val + d2_val + x2_val, instr));
+ int32_t mem_val = static_cast<int32_t>(ReadH(b2_val + d2_val + x2_val));
int32_t alu_out = 0;
bool isOF = false;
alu_out = r1_val + mem_val;
@@ -9796,8 +9863,7 @@ EVALUATE(SHY) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- int32_t mem_val =
- static_cast<int32_t>(ReadH(b2_val + d2_val + x2_val, instr));
+ int32_t mem_val = static_cast<int32_t>(ReadH(b2_val + d2_val + x2_val));
int32_t alu_out = 0;
bool isOF = false;
alu_out = r1_val - mem_val;
@@ -9919,7 +9985,7 @@ EVALUATE(LLGH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- uint16_t mem_val = ReadHU(b2_val + d2_val + x2_val, instr);
+ uint16_t mem_val = ReadHU(b2_val + d2_val + x2_val);
set_register(r1, mem_val);
return length;
}
@@ -9931,7 +9997,7 @@ EVALUATE(LLH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- uint16_t mem_val = ReadHU(b2_val + d2_val + x2_val, instr);
+ uint16_t mem_val = ReadHU(b2_val + d2_val + x2_val);
set_low_register(r1, mem_val);
return length;
}
@@ -9942,7 +10008,7 @@ EVALUATE(ML) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
DCHECK_EQ(r1 % 2, 0);
- uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2);
uint32_t r1_val = get_low_register<uint32_t>(r1 + 1);
uint64_t product =
static_cast<uint64_t>(r1_val) * static_cast<uint64_t>(mem_val);
@@ -9960,7 +10026,7 @@ EVALUATE(DL) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
DCHECK_EQ(r1 % 2, 0);
- uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2);
uint32_t r1_val = get_low_register<uint32_t>(r1 + 1);
uint64_t quotient =
static_cast<uint64_t>(r1_val) / static_cast<uint64_t>(mem_val);
@@ -10089,7 +10155,7 @@ EVALUATE(MVHI) {
DECODE_SIL_INSTRUCTION(b1, d1, i2);
int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
intptr_t src_addr = b1_val + d1;
- WriteW(src_addr, i2, instr);
+ WriteW(src_addr, i2);
return length;
}
@@ -10461,12 +10527,12 @@ EVALUATE(ASI) {
int d1_val = d1;
intptr_t addr = b1_val + d1_val;
- int32_t mem_val = ReadW(addr, instr);
+ int32_t mem_val = ReadW(addr);
bool isOF = CheckOverflowForIntAdd(mem_val, i2, int32_t);
int32_t alu_out = mem_val + i2;
SetS390ConditionCode<int32_t>(alu_out, 0);
SetS390OverflowCode(isOF);
- WriteW(addr, alu_out, instr);
+ WriteW(addr, alu_out);
return length;
}
@@ -10545,7 +10611,7 @@ EVALUATE(STMY) {
// Store each register in ascending order.
for (int i = 0; i <= r3 - r1; i++) {
int32_t value = get_low_register<int32_t>((r1 + i) % 16);
- WriteW(b2_val + offset + 4 * i, value, instr);
+ WriteW(b2_val + offset + 4 * i, value);
}
return length;
}
@@ -10571,7 +10637,7 @@ EVALUATE(LMY) {
// Store each register in ascending order.
for (int i = 0; i <= r3 - r1; i++) {
- int32_t value = ReadW(b2_val + offset + 4 * i, instr);
+ int32_t value = ReadW(b2_val + offset + 4 * i);
set_low_register((r1 + i) % 16, value);
}
return length;
@@ -11232,7 +11298,7 @@ EVALUATE(STEY) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
int32_t frs_val = get_fpr<int32_t>(r1);
- WriteW(addr, frs_val, instr);
+ WriteW(addr, frs_val);
return length;
}
diff --git a/deps/v8/src/execution/s390/simulator-s390.h b/deps/v8/src/execution/s390/simulator-s390.h
index 4c1b0a4924..cbe628691c 100644
--- a/deps/v8/src/execution/s390/simulator-s390.h
+++ b/deps/v8/src/execution/s390/simulator-s390.h
@@ -280,17 +280,17 @@ class Simulator : public SimulatorBase {
inline void WriteB(intptr_t addr, uint8_t value);
inline void WriteB(intptr_t addr, int8_t value);
- inline uint16_t ReadHU(intptr_t addr, Instruction* instr);
- inline int16_t ReadH(intptr_t addr, Instruction* instr);
+ inline uint16_t ReadHU(intptr_t addr);
+ inline int16_t ReadH(intptr_t addr);
// Note: Overloaded on the sign of the value.
- inline void WriteH(intptr_t addr, uint16_t value, Instruction* instr);
- inline void WriteH(intptr_t addr, int16_t value, Instruction* instr);
-
- inline uint32_t ReadWU(intptr_t addr, Instruction* instr);
- inline int32_t ReadW(intptr_t addr, Instruction* instr);
- inline int64_t ReadW64(intptr_t addr, Instruction* instr);
- inline void WriteW(intptr_t addr, uint32_t value, Instruction* instr);
- inline void WriteW(intptr_t addr, int32_t value, Instruction* instr);
+ inline void WriteH(intptr_t addr, uint16_t value);
+ inline void WriteH(intptr_t addr, int16_t value);
+
+ inline uint32_t ReadWU(intptr_t addr);
+ inline int32_t ReadW(intptr_t addr);
+ inline int64_t ReadW64(intptr_t addr);
+ inline void WriteW(intptr_t addr, uint32_t value);
+ inline void WriteW(intptr_t addr, int32_t value);
inline int64_t ReadDW(intptr_t addr);
inline double ReadDouble(intptr_t addr);
diff --git a/deps/v8/src/execution/simulator-base.h b/deps/v8/src/execution/simulator-base.h
index 9edc60a3f3..90e9441609 100644
--- a/deps/v8/src/execution/simulator-base.h
+++ b/deps/v8/src/execution/simulator-base.h
@@ -88,9 +88,9 @@ class SimulatorBase {
static typename std::enable_if<std::is_integral<T>::value, intptr_t>::type
ConvertArg(T arg) {
static_assert(sizeof(T) <= sizeof(intptr_t), "type bigger than ptrsize");
-#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64
- // The MIPS64 and RISCV64 calling convention is to sign extend all values,
- // even unsigned ones.
+#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
+ // The MIPS64, LOONG64 and RISCV64 calling convention is to sign extend all
+ // values, even unsigned ones.
using signed_t = typename std::make_signed<T>::type;
return static_cast<intptr_t>(static_cast<signed_t>(arg));
#else
diff --git a/deps/v8/src/execution/simulator.h b/deps/v8/src/execution/simulator.h
index 3b824e7632..5bf9d4612e 100644
--- a/deps/v8/src/execution/simulator.h
+++ b/deps/v8/src/execution/simulator.h
@@ -24,6 +24,8 @@
#include "src/execution/mips/simulator-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/execution/mips64/simulator-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/execution/loong64/simulator-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/execution/s390/simulator-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/execution/thread-local-top.h b/deps/v8/src/execution/thread-local-top.h
index f903747aeb..236beda8a0 100644
--- a/deps/v8/src/execution/thread-local-top.h
+++ b/deps/v8/src/execution/thread-local-top.h
@@ -5,6 +5,9 @@
#ifndef V8_EXECUTION_THREAD_LOCAL_TOP_H_
#define V8_EXECUTION_THREAD_LOCAL_TOP_H_
+#include "include/v8-callbacks.h"
+#include "include/v8-exception.h"
+#include "include/v8-unwinder.h"
#include "src/common/globals.h"
#include "src/execution/thread-id.h"
#include "src/objects/contexts.h"
@@ -63,8 +66,10 @@ class ThreadLocalTop {
// corresponds to the place on the JS stack where the C++ handler
// would have been if the stack were not separate.
Address try_catch_handler_address() {
- return reinterpret_cast<Address>(
- v8::TryCatch::JSStackComparableAddress(try_catch_handler_));
+ if (try_catch_handler_) {
+ return try_catch_handler_->JSStackComparableAddressPrivate();
+ }
+ return kNullAddress;
}
// Call depth represents nested v8 api calls. Instead of storing the nesting
diff --git a/deps/v8/src/execution/v8threads.cc b/deps/v8/src/execution/v8threads.cc
index 06575e9c64..3138823f7b 100644
--- a/deps/v8/src/execution/v8threads.cc
+++ b/deps/v8/src/execution/v8threads.cc
@@ -4,6 +4,7 @@
#include "src/execution/v8threads.h"
+#include "include/v8-locker.h"
#include "src/api/api.h"
#include "src/debug/debug.h"
#include "src/execution/execution.h"
diff --git a/deps/v8/src/execution/vm-state.h b/deps/v8/src/execution/vm-state.h
index 9621bee421..d903b222ee 100644
--- a/deps/v8/src/execution/vm-state.h
+++ b/deps/v8/src/execution/vm-state.h
@@ -5,7 +5,7 @@
#ifndef V8_EXECUTION_VM_STATE_H_
#define V8_EXECUTION_VM_STATE_H_
-#include "include/v8.h"
+#include "include/v8-unwinder.h"
#include "src/common/globals.h"
#include "src/logging/counters-scopes.h"
diff --git a/deps/v8/src/extensions/cputracemark-extension.cc b/deps/v8/src/extensions/cputracemark-extension.cc
index 029ad0f3cb..881ca3b1dc 100644
--- a/deps/v8/src/extensions/cputracemark-extension.cc
+++ b/deps/v8/src/extensions/cputracemark-extension.cc
@@ -4,6 +4,9 @@
#include "src/extensions/cputracemark-extension.h"
+#include "include/v8-isolate.h"
+#include "include/v8-template.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/extensions/cputracemark-extension.h b/deps/v8/src/extensions/cputracemark-extension.h
index 362bdcebd3..4eca092d4b 100644
--- a/deps/v8/src/extensions/cputracemark-extension.h
+++ b/deps/v8/src/extensions/cputracemark-extension.h
@@ -5,10 +5,14 @@
#ifndef V8_EXTENSIONS_CPUTRACEMARK_EXTENSION_H_
#define V8_EXTENSIONS_CPUTRACEMARK_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
#include "src/base/strings.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class CpuTraceMarkExtension : public v8::Extension {
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index 755023d8d6..dab8c224c4 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -4,6 +4,7 @@
#include "src/extensions/externalize-string-extension.h"
+#include "include/v8-template.h"
#include "src/api/api-inl.h"
#include "src/base/strings.h"
#include "src/execution/isolate.h"
diff --git a/deps/v8/src/extensions/externalize-string-extension.h b/deps/v8/src/extensions/externalize-string-extension.h
index 8d08a7474a..8fce62191d 100644
--- a/deps/v8/src/extensions/externalize-string-extension.h
+++ b/deps/v8/src/extensions/externalize-string-extension.h
@@ -5,9 +5,13 @@
#ifndef V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_
#define V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class ExternalizeStringExtension : public v8::Extension {
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index 6f1c601d8d..cda90bd507 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -4,7 +4,11 @@
#include "src/extensions/gc-extension.h"
-#include "include/v8.h"
+#include "include/v8-isolate.h"
+#include "include/v8-object.h"
+#include "include/v8-persistent-handle.h"
+#include "include/v8-primitive.h"
+#include "include/v8-template.h"
#include "src/base/platform/platform.h"
#include "src/execution/isolate.h"
#include "src/heap/heap.h"
diff --git a/deps/v8/src/extensions/gc-extension.h b/deps/v8/src/extensions/gc-extension.h
index c5750c5e80..f38a946b9f 100644
--- a/deps/v8/src/extensions/gc-extension.h
+++ b/deps/v8/src/extensions/gc-extension.h
@@ -5,10 +5,15 @@
#ifndef V8_EXTENSIONS_GC_EXTENSION_H_
#define V8_EXTENSIONS_GC_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
+#include "include/v8-local-handle.h"
#include "src/base/strings.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
// Provides garbage collection on invoking |fun_name|(options), where
diff --git a/deps/v8/src/extensions/ignition-statistics-extension.cc b/deps/v8/src/extensions/ignition-statistics-extension.cc
index 93ceeeeddf..454a85f50a 100644
--- a/deps/v8/src/extensions/ignition-statistics-extension.cc
+++ b/deps/v8/src/extensions/ignition-statistics-extension.cc
@@ -4,6 +4,8 @@
#include "src/extensions/ignition-statistics-extension.h"
+#include "include/v8-template.h"
+#include "src/api/api-inl.h"
#include "src/base/logging.h"
#include "src/execution/isolate.h"
#include "src/interpreter/bytecodes.h"
@@ -27,9 +29,10 @@ const char* const IgnitionStatisticsExtension::kSource =
void IgnitionStatisticsExtension::GetIgnitionDispatchCounters(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- args.GetReturnValue().Set(reinterpret_cast<Isolate*>(args.GetIsolate())
- ->interpreter()
- ->GetDispatchCountersObject());
+ args.GetReturnValue().Set(
+ Utils::ToLocal(reinterpret_cast<Isolate*>(args.GetIsolate())
+ ->interpreter()
+ ->GetDispatchCountersObject()));
}
} // namespace internal
diff --git a/deps/v8/src/extensions/ignition-statistics-extension.h b/deps/v8/src/extensions/ignition-statistics-extension.h
index fee55f6128..deffe4c915 100644
--- a/deps/v8/src/extensions/ignition-statistics-extension.h
+++ b/deps/v8/src/extensions/ignition-statistics-extension.h
@@ -5,9 +5,13 @@
#ifndef V8_EXTENSIONS_IGNITION_STATISTICS_EXTENSION_H_
#define V8_EXTENSIONS_IGNITION_STATISTICS_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class IgnitionStatisticsExtension : public v8::Extension {
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 1911dfc39e..976a97ad73 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -4,6 +4,7 @@
#include "src/extensions/statistics-extension.h"
+#include "include/v8-template.h"
#include "src/common/assert-scope.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
diff --git a/deps/v8/src/extensions/statistics-extension.h b/deps/v8/src/extensions/statistics-extension.h
index 4c53cbfdea..f2b0256ee2 100644
--- a/deps/v8/src/extensions/statistics-extension.h
+++ b/deps/v8/src/extensions/statistics-extension.h
@@ -5,9 +5,13 @@
#ifndef V8_EXTENSIONS_STATISTICS_EXTENSION_H_
#define V8_EXTENSIONS_STATISTICS_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class StatisticsExtension : public v8::Extension {
diff --git a/deps/v8/src/extensions/trigger-failure-extension.cc b/deps/v8/src/extensions/trigger-failure-extension.cc
index 44c07fbc00..2c66d036a2 100644
--- a/deps/v8/src/extensions/trigger-failure-extension.cc
+++ b/deps/v8/src/extensions/trigger-failure-extension.cc
@@ -4,6 +4,7 @@
#include "src/extensions/trigger-failure-extension.h"
+#include "include/v8-template.h"
#include "src/base/logging.h"
#include "src/common/checks.h"
diff --git a/deps/v8/src/extensions/trigger-failure-extension.h b/deps/v8/src/extensions/trigger-failure-extension.h
index e2cfac1eb3..22039ccb27 100644
--- a/deps/v8/src/extensions/trigger-failure-extension.h
+++ b/deps/v8/src/extensions/trigger-failure-extension.h
@@ -5,9 +5,13 @@
#ifndef V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
#define V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class TriggerFailureExtension : public v8::Extension {
diff --git a/deps/v8/src/extensions/vtunedomain-support-extension.cc b/deps/v8/src/extensions/vtunedomain-support-extension.cc
index 9a7715bb23..fcf2aa6961 100644
--- a/deps/v8/src/extensions/vtunedomain-support-extension.cc
+++ b/deps/v8/src/extensions/vtunedomain-support-extension.cc
@@ -3,9 +3,13 @@
// found in the LICENSE file.
#include "src/extensions/vtunedomain-support-extension.h"
+
#include <string>
#include <vector>
+#include "include/v8-isolate.h"
+#include "include/v8-template.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/extensions/vtunedomain-support-extension.h b/deps/v8/src/extensions/vtunedomain-support-extension.h
index 4640d0dfa5..cccfd74223 100644
--- a/deps/v8/src/extensions/vtunedomain-support-extension.h
+++ b/deps/v8/src/extensions/vtunedomain-support-extension.h
@@ -5,7 +5,7 @@
#ifndef V8_EXTENSIONS_VTUNEDOMAIN_SUPPORT_EXTENSION_H_
#define V8_EXTENSIONS_VTUNEDOMAIN_SUPPORT_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
#include "src/base/strings.h"
#include "src/base/vector.h"
#include "src/third_party/vtune/vtuneapi.h"
@@ -19,6 +19,10 @@
#define TASK_END_FAILED 1 << 6
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class VTuneDomainSupportExtension : public v8::Extension {
diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index 312d17b52f..ca8ed311a8 100644
--- a/deps/v8/src/flags/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -175,6 +175,20 @@ struct MaybeBoolFlag {
#define V8_HEAP_SANDBOX_BOOL false
#endif
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+#define V8_VIRTUAL_MEMORY_CAGE_BOOL true
+#else
+#define V8_VIRTUAL_MEMORY_CAGE_BOOL false
+#endif
+
+// D8's MultiMappedAllocator is only available on Linux, and only if the virtual
+// memory cage is not enabled.
+#if V8_OS_LINUX && !V8_VIRTUAL_MEMORY_CAGE_BOOL
+#define MULTI_MAPPED_ALLOCATOR_AVAILABLE true
+#else
+#define MULTI_MAPPED_ALLOCATOR_AVAILABLE false
+#endif
+
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
#define ENABLE_CONTROL_FLOW_INTEGRITY_BOOL true
#else
@@ -183,7 +197,7 @@ struct MaybeBoolFlag {
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_LOONG64
#define ENABLE_SPARKPLUG true
#else
// TODO(v8:11421): Enable Sparkplug for other architectures
@@ -299,10 +313,8 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
#define HARMONY_STAGED(V) \
HARMONY_STAGED_BASE(V) \
V(harmony_intl_best_fit_matcher, "Intl BestFitMatcher") \
- V(harmony_intl_displaynames_v2, "Intl.DisplayNames v2") \
- V(harmony_intl_locale_info, "Intl locale info") \
- V(harmony_intl_more_timezone, \
- "Extend Intl.DateTimeFormat timeZoneName Option")
+ V(harmony_intl_enumeration, "Intl Enumberation API") \
+ V(harmony_intl_locale_info, "Intl locale info")
#else
#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
#endif
@@ -319,10 +331,13 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
V(harmony_class_static_blocks, "harmony static initializer blocks")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_SHIPPING(V) \
- HARMONY_SHIPPING_BASE(V) \
- V(harmony_intl_dateformat_day_period, \
- "Add dayPeriod option to DateTimeFormat")
+#define HARMONY_SHIPPING(V) \
+ HARMONY_SHIPPING_BASE(V) \
+ V(harmony_intl_dateformat_day_period, \
+ "Add dayPeriod option to DateTimeFormat") \
+ V(harmony_intl_displaynames_v2, "Intl.DisplayNames v2") \
+ V(harmony_intl_more_timezone, \
+ "Extend Intl.DateTimeFormat timeZoneName Option")
#else
#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
#endif
@@ -490,6 +505,7 @@ DEFINE_BOOL(future, FUTURE_BOOL,
DEFINE_WEAK_IMPLICATION(future, turbo_inline_js_wasm_calls)
#if ENABLE_SPARKPLUG
DEFINE_WEAK_IMPLICATION(future, sparkplug)
+DEFINE_WEAK_IMPLICATION(future, flush_baseline_code)
#endif
#if V8_SHORT_BUILTIN_CALLS
DEFINE_WEAK_IMPLICATION(future, short_builtin_calls)
@@ -519,9 +535,9 @@ DEFINE_NEG_IMPLICATION(jitless, interpreted_frames_native_stack)
DEFINE_BOOL(assert_types, false,
"generate runtime type assertions to test the typer")
-DEFINE_BOOL(trace_code_dependencies, false, "trace code dependencies")
+DEFINE_BOOL(trace_compilation_dependencies, false, "trace code dependencies")
// Depend on --trace-deopt-verbose for reporting dependency invalidations.
-DEFINE_IMPLICATION(trace_code_dependencies, trace_deopt_verbose)
+DEFINE_IMPLICATION(trace_compilation_dependencies, trace_deopt_verbose)
#ifdef V8_ALLOCATION_SITE_TRACKING
#define V8_ALLOCATION_SITE_TRACKING_BOOL true
@@ -567,8 +583,17 @@ DEFINE_BOOL_READONLY(enable_sealed_frozen_elements_kind, true,
DEFINE_BOOL(unbox_double_arrays, true, "automatically unbox arrays of doubles")
DEFINE_BOOL_READONLY(string_slices, true, "use string slices")
+DEFINE_INT(ticks_before_optimization, 3,
+ "the number of times we have to go through the interrupt budget "
+ "before considering this function for optimization")
+DEFINE_INT(bytecode_size_allowance_per_tick, 1100,
+ "increases the number of ticks required for optimization by "
+ "bytecode.length/X")
DEFINE_INT(interrupt_budget, 132 * KB,
"interrupt budget which should be used for the profiler counter")
+DEFINE_INT(
+ max_bytecode_size_for_early_opt, 81,
+ "Maximum bytecode length for a function to be optimized on the first tick")
// Flags for inline caching and feedback vectors.
DEFINE_BOOL(use_ic, true, "use inline caching")
@@ -695,19 +720,21 @@ DEFINE_INT(concurrent_recompilation_queue_length, 8,
"the length of the concurrent compilation queue")
DEFINE_INT(concurrent_recompilation_delay, 0,
"artificial compilation delay in ms")
-DEFINE_BOOL(block_concurrent_recompilation, false,
- "block queued jobs until released")
DEFINE_BOOL(concurrent_inlining, false,
"run optimizing compiler's inlining phase on a separate thread")
-DEFINE_BOOL(stress_concurrent_inlining, false,
- "makes concurrent inlining more likely to trigger in tests")
+DEFINE_BOOL(
+ stress_concurrent_inlining, false,
+ "create additional concurrent optimization jobs but throw away result")
DEFINE_IMPLICATION(stress_concurrent_inlining, concurrent_inlining)
DEFINE_NEG_IMPLICATION(stress_concurrent_inlining, lazy_feedback_allocation)
DEFINE_WEAK_VALUE_IMPLICATION(stress_concurrent_inlining, interrupt_budget,
15 * KB)
+DEFINE_BOOL(stress_concurrent_inlining_attach_code, false,
+ "create additional concurrent optimization jobs")
+DEFINE_IMPLICATION(stress_concurrent_inlining_attach_code,
+ stress_concurrent_inlining)
DEFINE_INT(max_serializer_nesting, 25,
"maximum levels for nesting child serializers")
-DEFINE_WEAK_IMPLICATION(future, concurrent_inlining)
DEFINE_BOOL(trace_heap_broker_verbose, false,
"trace the heap broker verbosely (all reports)")
DEFINE_BOOL(trace_heap_broker_memory, false,
@@ -882,15 +909,6 @@ DEFINE_BOOL(optimize_for_size, false,
"speed")
DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1)
-#ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS
-#define V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS false
-#else
-#define V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS true
-#endif
-DEFINE_BOOL(untrusted_code_mitigations, V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS,
- "Enable mitigations for executing untrusted code")
-#undef V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS
-
// Flags for WebAssembly.
#if V8_ENABLE_WEBASSEMBLY
@@ -988,7 +1006,6 @@ DEFINE_STRING(dump_wasm_module_path, nullptr,
FOREACH_WASM_FEATURE_FLAG(DECL_WASM_FLAG)
#undef DECL_WASM_FLAG
-DEFINE_IMPLICATION(experimental_wasm_gc_experiments, experimental_wasm_gc)
DEFINE_IMPLICATION(experimental_wasm_gc, experimental_wasm_typed_funcref)
DEFINE_IMPLICATION(experimental_wasm_typed_funcref, experimental_wasm_reftypes)
@@ -1015,6 +1032,9 @@ DEFINE_NEG_NEG_IMPLICATION(wasm_bounds_checks, wasm_enforce_bounds_checks)
DEFINE_BOOL(wasm_math_intrinsics, true,
"intrinsify some Math imports into wasm")
+DEFINE_BOOL(
+ wasm_inlining, false,
+ "enable inlining of wasm functions into wasm functions (experimental)")
DEFINE_BOOL(wasm_loop_unrolling, true,
"enable loop unrolling for wasm functions")
DEFINE_BOOL(wasm_fuzzer_gen_test, false,
@@ -1580,8 +1600,9 @@ DEFINE_BOOL(debug_sim, false, "Enable debugging the simulator")
DEFINE_BOOL(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_INT(stop_sim_at, 0, "Simulator stop after x number of instructions")
-#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64) || \
- defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_RISCV64)
+#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64) || \
+ defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_RISCV64) || \
+ defined(V8_TARGET_ARCH_LOONG64)
DEFINE_INT(sim_stack_alignment, 16,
"Stack alignment in bytes in simulator. This must be a power of two "
"and it must be at least 16. 16 is default.")
@@ -1796,7 +1817,7 @@ DEFINE_BOOL(mock_arraybuffer_allocator, false,
DEFINE_SIZE_T(mock_arraybuffer_allocator_limit, 0,
"Memory limit for mock ArrayBuffer allocator used to simulate "
"OOM for testing.")
-#if V8_OS_LINUX
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
DEFINE_BOOL(multi_mapped_mock_allocator, false,
"Use a multi-mapped mock ArrayBuffer allocator for testing.")
#endif
@@ -2118,6 +2139,7 @@ DEFINE_NEG_IMPLICATION(single_threaded_gc, stress_concurrent_allocation)
DEFINE_BOOL(verify_predictable, false,
"this mode is used for checking that V8 behaves predictably")
+DEFINE_IMPLICATION(verify_predictable, predictable)
DEFINE_INT(dump_allocations_digest_at_alloc, -1,
"dump allocations digest each n-th allocation")
diff --git a/deps/v8/src/handles/DIR_METADATA b/deps/v8/src/handles/DIR_METADATA
index ff55846b31..af999da1f2 100644
--- a/deps/v8/src/handles/DIR_METADATA
+++ b/deps/v8/src/handles/DIR_METADATA
@@ -7,5 +7,5 @@
# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
monorail {
- component: "Blink>JavaScript>GC"
-} \ No newline at end of file
+ component: "Blink>JavaScript>GarbageCollection"
+}
diff --git a/deps/v8/src/handles/global-handles.cc b/deps/v8/src/handles/global-handles.cc
index 55230a6d0b..d8d5016667 100644
--- a/deps/v8/src/handles/global-handles.cc
+++ b/deps/v8/src/handles/global-handles.cc
@@ -8,7 +8,7 @@
#include <cstdint>
#include <map>
-#include "include/v8.h"
+#include "include/v8-traced-handle.h"
#include "src/api/api-inl.h"
#include "src/base/compiler-specific.h"
#include "src/base/sanitizer/asan.h"
diff --git a/deps/v8/src/handles/global-handles.h b/deps/v8/src/handles/global-handles.h
index 237cedbbb5..d7f68e5b55 100644
--- a/deps/v8/src/handles/global-handles.h
+++ b/deps/v8/src/handles/global-handles.h
@@ -10,8 +10,9 @@
#include <utility>
#include <vector>
+#include "include/v8-callbacks.h"
+#include "include/v8-persistent-handle.h"
#include "include/v8-profiler.h"
-#include "include/v8.h"
#include "src/handles/handles.h"
#include "src/heap/heap.h"
#include "src/objects/objects.h"
diff --git a/deps/v8/src/handles/handles.h b/deps/v8/src/handles/handles.h
index 929cba0bc7..166b7ee4ab 100644
--- a/deps/v8/src/handles/handles.h
+++ b/deps/v8/src/handles/handles.h
@@ -7,7 +7,6 @@
#include <type_traits>
-#include "include/v8.h"
#include "src/base/functional.h"
#include "src/base/macros.h"
#include "src/common/checks.h"
@@ -15,6 +14,9 @@
#include "src/zone/zone.h"
namespace v8 {
+
+class HandleScope;
+
namespace internal {
// Forward declarations.
diff --git a/deps/v8/src/heap/DIR_METADATA b/deps/v8/src/heap/DIR_METADATA
index ff55846b31..af999da1f2 100644
--- a/deps/v8/src/heap/DIR_METADATA
+++ b/deps/v8/src/heap/DIR_METADATA
@@ -7,5 +7,5 @@
# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
monorail {
- component: "Blink>JavaScript>GC"
-} \ No newline at end of file
+ component: "Blink>JavaScript>GarbageCollection"
+}
diff --git a/deps/v8/src/heap/array-buffer-sweeper.cc b/deps/v8/src/heap/array-buffer-sweeper.cc
index 597e4d0f93..2bdcec0bf7 100644
--- a/deps/v8/src/heap/array-buffer-sweeper.cc
+++ b/deps/v8/src/heap/array-buffer-sweeper.cc
@@ -71,6 +71,7 @@ size_t ArrayBufferList::BytesSlow() {
void ArrayBufferSweeper::EnsureFinished() {
if (!sweeping_in_progress_) return;
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_COMPLETE_SWEEP_ARRAY_BUFFERS);
TryAbortResult abort_result =
heap_->isolate()->cancelable_task_manager()->TryAbort(job_->id_);
@@ -112,9 +113,10 @@ void ArrayBufferSweeper::MergeBackExtensionsWhenSwept() {
if (job_->state_ == SweepingState::kDone) {
Merge();
sweeping_in_progress_ = false;
- } else {
- UpdateCountersForConcurrentlySweptExtensions();
}
+ // Update freed counters either way. It is necessary to update the counter
+ // in case sweeping is done to avoid counter overflows.
+ UpdateCountersForConcurrentlySweptExtensions();
}
}
diff --git a/deps/v8/src/heap/base/asm/loong64/push_registers_asm.cc b/deps/v8/src/heap/base/asm/loong64/push_registers_asm.cc
new file mode 100644
index 0000000000..aa8dcd356b
--- /dev/null
+++ b/deps/v8/src/heap/base/asm/loong64/push_registers_asm.cc
@@ -0,0 +1,48 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+asm(".text \n"
+ ".global PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // Push all callee-saved registers and save return address.
+ " addi.d $sp, $sp, -96 \n"
+ " st.d $ra, $sp, 88 \n"
+ " st.d $s8, $sp, 80 \n"
+ " st.d $sp, $sp, 72 \n"
+ " st.d $fp, $sp, 64 \n"
+ " st.d $s7, $sp, 56 \n"
+ " st.d $s6, $sp, 48 \n"
+ " st.d $s5, $sp, 40 \n"
+ " st.d $s4, $sp, 32 \n"
+ " st.d $s3, $sp, 24 \n"
+ " st.d $s2, $sp, 16 \n"
+ " st.d $s1, $sp, 8 \n"
+ " st.d $s0, $sp, 0 \n"
+ // Maintain frame pointer.
+ " addi.d $s8, $sp, 0 \n"
+ // Pass 1st parameter (a0) unchanged (Stack*).
+ // Pass 2nd parameter (a1) unchanged (StackVisitor*).
+ // Save 3rd parameter (a2; IterateStackCallback).
+ " addi.d $a3, $a2, 0 \n"
+ // Call the callback.
+ // Pass 3rd parameter as sp (stack pointer).
+ " addi.d $a2, $sp, 0 \n"
+ " jirl $ra, $a3, 0 \n"
+ // Load return address.
+ " ld.d $ra, $sp, 88 \n"
+ // Restore frame pointer.
+ " ld.d $s8, $sp, 80 \n"
+ // Discard all callee-saved registers.
+ " addi.d $sp, $sp, 96 \n"
+ " jirl $zero, $ra, 0 \n");
diff --git a/deps/v8/src/heap/base/stack.cc b/deps/v8/src/heap/base/stack.cc
index fd5eab4528..8b6713e687 100644
--- a/deps/v8/src/heap/base/stack.cc
+++ b/deps/v8/src/heap/base/stack.cc
@@ -9,6 +9,7 @@
#include "src/base/platform/platform.h"
#include "src/base/sanitizer/asan.h"
#include "src/base/sanitizer/msan.h"
+#include "src/base/sanitizer/tsan.h"
#include "src/heap/cppgc/globals.h"
namespace heap {
@@ -43,6 +44,10 @@ namespace {
// No ASAN support as accessing fake frames otherwise results in
// "stack-use-after-scope" warnings.
DISABLE_ASAN
+// No TSAN support as the stack may not be exclusively owned by the current
+// thread, e.g., for interrupt handling. Atomic reads are not enough as the
+// other thread may use a lock to synchronize the access.
+DISABLE_TSAN
void IterateAsanFakeFrameIfNecessary(StackVisitor* visitor,
void* asan_fake_stack,
const void* stack_start,
@@ -103,6 +108,10 @@ void IterateSafeStackIfNecessary(StackVisitor* visitor) {
V8_NOINLINE
// No ASAN support as method accesses redzones while walking the stack.
DISABLE_ASAN
+// No TSAN support as the stack may not be exclusively owned by the current
+// thread, e.g., for interrupt handling. Atomic reads are not enough as the
+// other thread may use a lock to synchronize the access.
+DISABLE_TSAN
void IteratePointersImpl(const Stack* stack, StackVisitor* visitor,
intptr_t* stack_end) {
#ifdef V8_USE_ADDRESS_SANITIZER
@@ -133,6 +142,7 @@ void Stack::IteratePointers(StackVisitor* visitor) const {
PushAllRegistersAndIterateStack(this, visitor, &IteratePointersImpl);
// No need to deal with callee-saved registers as they will be kept alive by
// the regular conservative stack iteration.
+ // TODO(chromium:1056170): Add support for SIMD and/or filtering.
IterateSafeStackIfNecessary(visitor);
}
diff --git a/deps/v8/src/heap/basic-memory-chunk.cc b/deps/v8/src/heap/basic-memory-chunk.cc
index 6fb0467c39..0c7a8170cf 100644
--- a/deps/v8/src/heap/basic-memory-chunk.cc
+++ b/deps/v8/src/heap/basic-memory-chunk.cc
@@ -25,6 +25,26 @@ STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
heap_internals::MemoryChunk::kHeapOffset);
+// static
+constexpr BasicMemoryChunk::MainThreadFlags BasicMemoryChunk::kAllFlagsMask;
+// static
+constexpr BasicMemoryChunk::MainThreadFlags
+ BasicMemoryChunk::kPointersToHereAreInterestingMask;
+// static
+constexpr BasicMemoryChunk::MainThreadFlags
+ BasicMemoryChunk::kPointersFromHereAreInterestingMask;
+// static
+constexpr BasicMemoryChunk::MainThreadFlags
+ BasicMemoryChunk::kEvacuationCandidateMask;
+// static
+constexpr BasicMemoryChunk::MainThreadFlags
+ BasicMemoryChunk::kIsInYoungGenerationMask;
+// static
+constexpr BasicMemoryChunk::MainThreadFlags BasicMemoryChunk::kIsLargePageMask;
+// static
+constexpr BasicMemoryChunk::MainThreadFlags
+ BasicMemoryChunk::kSkipEvacuationSlotsRecordingMask;
+
BasicMemoryChunk::BasicMemoryChunk(size_t size, Address area_start,
Address area_end) {
size_ = size;
@@ -75,13 +95,11 @@ class BasicMemoryChunkValidator {
STATIC_ASSERT(BasicMemoryChunk::kSizeOffset ==
offsetof(BasicMemoryChunk, size_));
STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
- offsetof(BasicMemoryChunk, flags_));
+ offsetof(BasicMemoryChunk, main_thread_flags_));
STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
offsetof(BasicMemoryChunk, heap_));
STATIC_ASSERT(offsetof(BasicMemoryChunk, size_) ==
MemoryChunkLayout::kSizeOffset);
- STATIC_ASSERT(offsetof(BasicMemoryChunk, flags_) ==
- MemoryChunkLayout::kFlagsOffset);
STATIC_ASSERT(offsetof(BasicMemoryChunk, heap_) ==
MemoryChunkLayout::kHeapOffset);
STATIC_ASSERT(offsetof(BasicMemoryChunk, area_start_) ==
diff --git a/deps/v8/src/heap/basic-memory-chunk.h b/deps/v8/src/heap/basic-memory-chunk.h
index 993291dc0e..de91e6ea9f 100644
--- a/deps/v8/src/heap/basic-memory-chunk.h
+++ b/deps/v8/src/heap/basic-memory-chunk.h
@@ -9,6 +9,7 @@
#include <unordered_map>
#include "src/base/atomic-utils.h"
+#include "src/base/flags.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/heap/marking.h"
@@ -30,7 +31,7 @@ class BasicMemoryChunk {
}
};
- enum Flag {
+ enum Flag : uintptr_t {
NO_FLAGS = 0u,
IS_EXECUTABLE = 1u << 0,
POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
@@ -44,12 +45,6 @@ class BasicMemoryChunk {
EVACUATION_CANDIDATE = 1u << 6,
NEVER_EVACUATE = 1u << 7,
- // Large objects can have a progress bar in their page header. These object
- // are scanned in increments and will be kept black while being scanned.
- // Even if the mutator writes to them they will be kept black and a white
- // to grey transition is performed in the value.
- HAS_PROGRESS_BAR = 1u << 8,
-
// |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
// from new to old space during evacuation.
PAGE_NEW_OLD_PROMOTION = 1u << 9,
@@ -111,6 +106,28 @@ class BasicMemoryChunk {
IN_SHARED_HEAP = 1u << 23,
};
+ using MainThreadFlags = base::Flags<Flag, uintptr_t>;
+
+ static constexpr MainThreadFlags kAllFlagsMask = ~MainThreadFlags(NO_FLAGS);
+
+ static constexpr MainThreadFlags kPointersToHereAreInterestingMask =
+ POINTERS_TO_HERE_ARE_INTERESTING;
+
+ static constexpr MainThreadFlags kPointersFromHereAreInterestingMask =
+ POINTERS_FROM_HERE_ARE_INTERESTING;
+
+ static constexpr MainThreadFlags kEvacuationCandidateMask =
+ EVACUATION_CANDIDATE;
+
+ static constexpr MainThreadFlags kIsInYoungGenerationMask =
+ MainThreadFlags(FROM_PAGE) | MainThreadFlags(TO_PAGE);
+
+ static constexpr MainThreadFlags kIsLargePageMask = LARGE_PAGE;
+
+ static constexpr MainThreadFlags kSkipEvacuationSlotsRecordingMask =
+ MainThreadFlags(kEvacuationCandidateMask) |
+ MainThreadFlags(kIsInYoungGenerationMask);
+
static const intptr_t kAlignment =
(static_cast<uintptr_t>(1) << kPageSizeBits);
@@ -157,54 +174,20 @@ class BasicMemoryChunk {
void set_owner(BaseSpace* space) { owner_ = space; }
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- void SetFlag(Flag flag) {
- if (access_mode == AccessMode::NON_ATOMIC) {
- flags_ |= flag;
- } else {
- base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
- }
- }
-
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- bool IsFlagSet(Flag flag) const {
- return (GetFlags<access_mode>() & flag) != 0;
+ void SetFlag(Flag flag) { main_thread_flags_ |= flag; }
+ bool IsFlagSet(Flag flag) const { return main_thread_flags_ & flag; }
+ void ClearFlag(Flag flag) {
+ main_thread_flags_ = main_thread_flags_.without(flag);
}
-
- void ClearFlag(Flag flag) { flags_ &= ~flag; }
-
- // Set or clear multiple flags at a time. The flags in the mask are set to
- // the value in "flags", the rest retain the current value in |flags_|.
- void SetFlags(uintptr_t flags, uintptr_t mask) {
- flags_ = (flags_ & ~mask) | (flags & mask);
+ void ClearFlags(MainThreadFlags flags) { main_thread_flags_ &= ~flags; }
+ // Set or clear multiple flags at a time. `mask` indicates which flags are
+ // should be replaced with new `flags`.
+ void SetFlags(MainThreadFlags flags, MainThreadFlags mask) {
+ main_thread_flags_ = (main_thread_flags_ & ~mask) | (flags & mask);
}
// Return all current flags.
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- uintptr_t GetFlags() const {
- if (access_mode == AccessMode::NON_ATOMIC) {
- return flags_;
- } else {
- return base::AsAtomicWord::Relaxed_Load(&flags_);
- }
- }
-
- using Flags = uintptr_t;
-
- static const Flags kPointersToHereAreInterestingMask =
- POINTERS_TO_HERE_ARE_INTERESTING;
-
- static const Flags kPointersFromHereAreInterestingMask =
- POINTERS_FROM_HERE_ARE_INTERESTING;
-
- static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
-
- static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
-
- static const Flags kIsLargePageMask = LARGE_PAGE;
-
- static const Flags kSkipEvacuationSlotsRecordingMask =
- kEvacuationCandidateMask | kIsInYoungGenerationMask;
+ MainThreadFlags GetFlags() const { return main_thread_flags_; }
private:
bool InReadOnlySpaceRaw() const { return IsFlagSet(READ_ONLY_HEAP); }
@@ -227,16 +210,13 @@ class BasicMemoryChunk {
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
}
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- bool IsEvacuationCandidate() {
- DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
- IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
- return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
+ bool IsEvacuationCandidate() const {
+ DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
+ return IsFlagSet(EVACUATION_CANDIDATE);
}
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- bool ShouldSkipEvacuationSlotRecording() {
- uintptr_t flags = GetFlags<access_mode>();
+ bool ShouldSkipEvacuationSlotRecording() const {
+ MainThreadFlags flags = GetFlags();
return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
((flags & COMPACTION_WAS_ABORTED) == 0);
}
@@ -360,7 +340,9 @@ class BasicMemoryChunk {
// Overall size of the chunk, including the header and guards.
size_t size_;
- uintptr_t flags_ = NO_FLAGS;
+ // Flags that are only mutable from the main thread when no concurrent
+ // component (e.g. marker, sweeper) is running.
+ MainThreadFlags main_thread_flags_{NO_FLAGS};
// TODO(v8:7464): Find a way to remove this.
// This goes against the spirit for the BasicMemoryChunk, but until C++14/17
@@ -399,6 +381,8 @@ class BasicMemoryChunk {
friend class PagedSpace;
};
+DEFINE_OPERATORS_FOR_FLAGS(BasicMemoryChunk::MainThreadFlags)
+
STATIC_ASSERT(std::is_standard_layout<BasicMemoryChunk>::value);
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc-js/DEPS b/deps/v8/src/heap/cppgc-js/DEPS
new file mode 100644
index 0000000000..37049928d5
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+include/cppgc",
+]
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.cc b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
index 8c5813867f..c21d1ceb50 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -10,8 +10,8 @@
#include "include/cppgc/heap-consistency.h"
#include "include/cppgc/platform.h"
+#include "include/v8-local-handle.h"
#include "include/v8-platform.h"
-#include "include/v8.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
@@ -217,6 +217,14 @@ void UnifiedHeapMarker::AddObject(void* object) {
cppgc::internal::HeapObjectHeader::FromObject(object));
}
+void FatalOutOfMemoryHandlerImpl(const std::string& reason,
+ const SourceLocation&, HeapBase* heap) {
+ FatalProcessOutOfMemory(
+ reinterpret_cast<v8::internal::Isolate*>(
+ static_cast<v8::internal::CppHeap*>(heap)->isolate()),
+ reason.c_str());
+}
+
} // namespace
void CppHeap::MetricRecorderAdapter::AddMainThreadEvent(
@@ -355,6 +363,7 @@ void CppHeap::AttachIsolate(Isolate* isolate) {
wrapper_descriptor_);
SetMetricRecorder(std::make_unique<MetricRecorderAdapter>(*this));
SetStackStart(base::Stack::GetStackStart());
+ oom_handler().SetCustomHandler(&FatalOutOfMemoryHandlerImpl);
no_gc_scope_--;
}
@@ -376,6 +385,7 @@ void CppHeap::DetachIsolate() {
isolate_ = nullptr;
// Any future garbage collections will ignore the V8->C++ references.
isolate()->SetEmbedderHeapTracer(nullptr);
+ oom_handler().SetCustomHandler(nullptr);
// Enter no GC scope.
no_gc_scope_++;
}
@@ -483,13 +493,14 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
// The allocated bytes counter in v8 was reset to the current marked bytes, so
// any pending allocated bytes updates should be discarded.
buffered_allocated_bytes_ = 0;
- ExecutePreFinalizers();
- // TODO(chromium:1056170): replace build flag with dedicated flag.
-#if DEBUG
+ const size_t bytes_allocated_in_prefinalizers = ExecutePreFinalizers();
+#if CPPGC_VERIFY_HEAP
UnifiedHeapMarkingVerifier verifier(*this);
- verifier.Run(stack_state_of_prev_gc(), stack_end_of_current_gc(),
- stats_collector()->marked_bytes());
-#endif
+ verifier.Run(
+ stack_state_of_prev_gc(), stack_end_of_current_gc(),
+ stats_collector()->marked_bytes() + bytes_allocated_in_prefinalizers);
+#endif // CPPGC_VERIFY_HEAP
+ USE(bytes_allocated_in_prefinalizers);
{
cppgc::subtle::NoGarbageCollectionScope no_gc(*this);
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.h b/deps/v8/src/heap/cppgc-js/cpp-heap.h
index 8e4c047d1c..a2d11bcd39 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.h
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.h
@@ -10,8 +10,10 @@ static_assert(
false, "V8 targets can not be built with cppgc_is_standalone set to true.");
#endif
+#include "include/v8-callbacks.h"
#include "include/v8-cppgc.h"
-#include "include/v8.h"
+#include "include/v8-embedder-heap.h"
+#include "include/v8-metrics.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/stats-collector.h"
diff --git a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
index dc55753ff6..9b20b5c0a7 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
@@ -264,6 +264,10 @@ class State final : public StateBase {
ephemeron_edges_.insert(&value);
}
+ void AddEagerEphemeronEdge(const void* value, cppgc::TraceCallback callback) {
+ eager_ephemeron_edges_.insert({value, callback});
+ }
+
template <typename Callback>
void ForAllEphemeronEdges(Callback callback) {
for (const HeapObjectHeader* value : ephemeron_edges_) {
@@ -271,10 +275,20 @@ class State final : public StateBase {
}
}
+ template <typename Callback>
+ void ForAllEagerEphemeronEdges(Callback callback) {
+ for (const auto& pair : eager_ephemeron_edges_) {
+ callback(pair.first, pair.second);
+ }
+ }
+
private:
bool is_weak_container_ = false;
// Values that are held alive through ephemerons by this particular key.
std::unordered_set<const HeapObjectHeader*> ephemeron_edges_;
+ // Values that are eagerly traced and held alive through ephemerons by this
+ // particular key.
+ std::unordered_map<const void*, cppgc::TraceCallback> eager_ephemeron_edges_;
};
// Root states are similar to regular states with the difference that they are
@@ -404,6 +418,9 @@ class CppGraphBuilderImpl final {
void VisitForVisibility(State& parent, const TracedReferenceBase&);
void VisitEphemeronForVisibility(const HeapObjectHeader& key,
const HeapObjectHeader& value);
+ void VisitEphemeronWithNonGarbageCollectedValueForVisibility(
+ const HeapObjectHeader& key, const void* value,
+ cppgc::TraceDescriptor value_desc);
void VisitWeakContainerForVisibility(const HeapObjectHeader&);
void VisitRootForGraphBuilding(RootState&, const HeapObjectHeader&,
const cppgc::SourceLocation&);
@@ -421,7 +438,7 @@ class CppGraphBuilderImpl final {
}
void AddEdge(State& parent, const HeapObjectHeader& header,
- const std::string& edge_name = {}) {
+ const std::string& edge_name) {
DCHECK(parent.IsVisibleNotDependent());
auto& current = states_.GetExistingState(header);
if (!current.IsVisibleNotDependent()) return;
@@ -443,7 +460,8 @@ class CppGraphBuilderImpl final {
}
}
- void AddEdge(State& parent, const TracedReferenceBase& ref) {
+ void AddEdge(State& parent, const TracedReferenceBase& ref,
+ const std::string& edge_name) {
DCHECK(parent.IsVisibleNotDependent());
v8::Local<v8::Value> v8_value = ref.Get(cpp_heap_.isolate());
if (!v8_value.IsEmpty()) {
@@ -451,12 +469,19 @@ class CppGraphBuilderImpl final {
parent.set_node(AddNode(*parent.header()));
}
auto* v8_node = graph_.V8Node(v8_value);
- graph_.AddEdge(parent.get_node(), v8_node);
+ if (!edge_name.empty()) {
+ graph_.AddEdge(parent.get_node(), v8_node,
+ parent.get_node()->InternalizeEdgeName(edge_name));
+ } else {
+ graph_.AddEdge(parent.get_node(), v8_node);
+ }
// References that have a class id set may have their internal fields
// pointing back to the object. Set up a wrapper node for the graph so
// that the snapshot generator can merge the nodes appropriately.
- if (!ref.WrapperClassId()) return;
+ // Even with a set class id, do not set up a wrapper node when the edge
+ // has a specific name.
+ if (!ref.WrapperClassId() || !edge_name.empty()) return;
void* back_reference_object = ExtractEmbedderDataBackref(
reinterpret_cast<v8::internal::Isolate*>(cpp_heap_.isolate()),
@@ -598,8 +623,18 @@ class WeakVisitor : public JSVisitor {
void VisitEphemeron(const void* key, const void* value,
cppgc::TraceDescriptor value_desc) final {
// For ephemerons, the key retains the value.
+ // Key always must be a GarbageCollected object.
+ auto& key_header = HeapObjectHeader::FromObject(key);
+ if (!value_desc.base_object_payload) {
+ // Value does not represent an actual GarbageCollected object but rather
+ // should be traced eagerly.
+ graph_builder_.VisitEphemeronWithNonGarbageCollectedValueForVisibility(
+ key_header, value, value_desc);
+ return;
+ }
+ // Regular path where both key and value are GarbageCollected objects.
graph_builder_.VisitEphemeronForVisibility(
- HeapObjectHeader::FromObject(key), HeapObjectHeader::FromObject(value));
+ key_header, HeapObjectHeader::FromObject(value));
}
protected:
@@ -645,7 +680,7 @@ class GraphBuildingVisitor final : public JSVisitor {
void Visit(const void*, cppgc::TraceDescriptor desc) final {
graph_builder_.AddEdge(
parent_scope_.ParentAsRegularState(),
- HeapObjectHeader::FromObject(desc.base_object_payload));
+ HeapObjectHeader::FromObject(desc.base_object_payload), edge_name_);
}
void VisitWeakContainer(const void* object,
cppgc::TraceDescriptor strong_desc,
@@ -655,7 +690,8 @@ class GraphBuildingVisitor final : public JSVisitor {
// container itself.
graph_builder_.AddEdge(
parent_scope_.ParentAsRegularState(),
- HeapObjectHeader::FromObject(strong_desc.base_object_payload));
+ HeapObjectHeader::FromObject(strong_desc.base_object_payload),
+ edge_name_);
}
void VisitRoot(const void*, cppgc::TraceDescriptor desc,
const cppgc::SourceLocation& loc) final {
@@ -667,12 +703,18 @@ class GraphBuildingVisitor final : public JSVisitor {
const void*, const cppgc::SourceLocation&) final {}
// JS handling.
void Visit(const TracedReferenceBase& ref) final {
- graph_builder_.AddEdge(parent_scope_.ParentAsRegularState(), ref);
+ graph_builder_.AddEdge(parent_scope_.ParentAsRegularState(), ref,
+ edge_name_);
+ }
+
+ void set_edge_name(std::string edge_name) {
+ edge_name_ = std::move(edge_name);
}
private:
CppGraphBuilderImpl& graph_builder_;
const ParentScope& parent_scope_;
+ std::string edge_name_;
};
// Base class for transforming recursion into iteration. Items are processed
@@ -765,6 +807,19 @@ void CppGraphBuilderImpl::VisitForVisibility(State* parent,
}
}
+void CppGraphBuilderImpl::
+ VisitEphemeronWithNonGarbageCollectedValueForVisibility(
+ const HeapObjectHeader& key, const void* value,
+ cppgc::TraceDescriptor value_desc) {
+ auto& key_state = states_.GetOrCreateState(key);
+ // Eagerly trace the value here, effectively marking key as visible and
+ // queuing processing for all reachable values.
+ ParentScope parent_scope(key_state);
+ VisiblityVisitor visitor(*this, parent_scope);
+ value_desc.callback(&visitor, value);
+ key_state.AddEagerEphemeronEdge(value, value_desc.callback);
+}
+
void CppGraphBuilderImpl::VisitEphemeronForVisibility(
const HeapObjectHeader& key, const HeapObjectHeader& value) {
auto& key_state = states_.GetOrCreateState(key);
@@ -820,6 +875,12 @@ void CppGraphBuilderImpl::Run() {
state.ForAllEphemeronEdges([this, &state](const HeapObjectHeader& value) {
AddEdge(state, value, "part of key -> value pair in ephemeron table");
});
+ object_visitor.set_edge_name(
+ "part of key -> value pair in ephemeron table");
+ state.ForAllEagerEphemeronEdges(
+ [&object_visitor](const void* value, cppgc::TraceCallback callback) {
+ callback(&object_visitor, value);
+ });
});
// Add roots.
{
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
index d98e2b54bf..388fa94aab 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
@@ -6,7 +6,6 @@
#define V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_H_
#include "include/v8-cppgc.h"
-#include "include/v8.h"
#include "src/base/logging.h"
#include "src/heap/heap.h"
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
index e9da1163e4..09564055dc 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
@@ -4,7 +4,6 @@
#include "src/heap/cppgc-js/unified-heap-marking-visitor.h"
-#include "include/v8.h"
#include "src/heap/cppgc-js/unified-heap-marking-state.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marking-state.h"
diff --git a/deps/v8/src/heap/cppgc/DEPS b/deps/v8/src/heap/cppgc/DEPS
new file mode 100644
index 0000000000..37049928d5
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+include/cppgc",
+]
diff --git a/deps/v8/src/heap/cppgc/caged-heap-local-data.cc b/deps/v8/src/heap/cppgc/caged-heap-local-data.cc
index 55ededdc08..b1ce0df00f 100644
--- a/deps/v8/src/heap/cppgc/caged-heap-local-data.cc
+++ b/deps/v8/src/heap/cppgc/caged-heap-local-data.cc
@@ -13,6 +13,14 @@
namespace cppgc {
namespace internal {
+CagedHeapLocalData::CagedHeapLocalData(HeapBase& heap_base,
+ PageAllocator& allocator)
+ : heap_base(heap_base) {
+#if defined(CPPGC_YOUNG_GENERATION)
+ age_table.Reset(&allocator);
+#endif // defined(CPPGC_YOUNG_GENERATION)
+}
+
#if defined(CPPGC_YOUNG_GENERATION)
static_assert(
@@ -30,7 +38,7 @@ void AgeTable::Reset(PageAllocator* allocator) {
allocator->DiscardSystemPages(reinterpret_cast<void*>(begin), end - begin);
}
-#endif
+#endif // defined(CPPGC_YOUNG_GENERATION)
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/caged-heap.cc b/deps/v8/src/heap/cppgc/caged-heap.cc
index c43ea6e3a5..2b5fed4af5 100644
--- a/deps/v8/src/heap/cppgc/caged-heap.cc
+++ b/deps/v8/src/heap/cppgc/caged-heap.cc
@@ -27,18 +27,17 @@ STATIC_ASSERT(api_constants::kCagedHeapReservationAlignment ==
namespace {
-VirtualMemory ReserveCagedHeap(PageAllocator* platform_allocator) {
- DCHECK_NOT_NULL(platform_allocator);
+VirtualMemory ReserveCagedHeap(PageAllocator& platform_allocator) {
DCHECK_EQ(0u,
- kCagedHeapReservationSize % platform_allocator->AllocatePageSize());
+ kCagedHeapReservationSize % platform_allocator.AllocatePageSize());
static constexpr size_t kAllocationTries = 4;
for (size_t i = 0; i < kAllocationTries; ++i) {
void* hint = reinterpret_cast<void*>(RoundDown(
- reinterpret_cast<uintptr_t>(platform_allocator->GetRandomMmapAddr()),
+ reinterpret_cast<uintptr_t>(platform_allocator.GetRandomMmapAddr()),
kCagedHeapReservationAlignment));
- VirtualMemory memory(platform_allocator, kCagedHeapReservationSize,
+ VirtualMemory memory(&platform_allocator, kCagedHeapReservationSize,
kCagedHeapReservationAlignment, hint);
if (memory.IsReserved()) return memory;
}
@@ -70,23 +69,19 @@ class CppgcBoundedPageAllocator final : public v8::base::BoundedPageAllocator {
} // namespace
-CagedHeap::CagedHeap(HeapBase* heap_base, PageAllocator* platform_allocator)
+CagedHeap::CagedHeap(HeapBase& heap_base, PageAllocator& platform_allocator)
: reserved_area_(ReserveCagedHeap(platform_allocator)) {
using CagedAddress = CagedHeap::AllocatorType::Address;
- DCHECK_NOT_NULL(heap_base);
-
- CHECK(platform_allocator->SetPermissions(
+ const bool is_not_oom = platform_allocator.SetPermissions(
reserved_area_.address(),
- RoundUp(sizeof(CagedHeapLocalData), platform_allocator->CommitPageSize()),
- PageAllocator::kReadWrite));
+ RoundUp(sizeof(CagedHeapLocalData), platform_allocator.CommitPageSize()),
+ PageAllocator::kReadWrite);
+ // Failing to commit the reservation means that we are out of memory.
+ CHECK(is_not_oom);
- auto* local_data =
- new (reserved_area_.address()) CagedHeapLocalData(heap_base);
-#if defined(CPPGC_YOUNG_GENERATION)
- local_data->age_table.Reset(platform_allocator);
-#endif
- USE(local_data);
+ new (reserved_area_.address())
+ CagedHeapLocalData(heap_base, platform_allocator);
const CagedAddress caged_heap_start =
RoundUp(reinterpret_cast<CagedAddress>(reserved_area_.address()) +
@@ -97,7 +92,7 @@ CagedHeap::CagedHeap(HeapBase* heap_base, PageAllocator* platform_allocator)
reinterpret_cast<CagedAddress>(reserved_area_.address());
bounded_allocator_ = std::make_unique<CppgcBoundedPageAllocator>(
- platform_allocator, caged_heap_start,
+ &platform_allocator, caged_heap_start,
reserved_area_.size() - local_data_size_with_padding, kPageSize);
}
diff --git a/deps/v8/src/heap/cppgc/caged-heap.h b/deps/v8/src/heap/cppgc/caged-heap.h
index 7ac34624a0..89b2f7f112 100644
--- a/deps/v8/src/heap/cppgc/caged-heap.h
+++ b/deps/v8/src/heap/cppgc/caged-heap.h
@@ -22,7 +22,17 @@ class CagedHeap final {
public:
using AllocatorType = v8::base::BoundedPageAllocator;
- CagedHeap(HeapBase* heap, PageAllocator* platform_allocator);
+ static uintptr_t OffsetFromAddress(const void* address) {
+ return reinterpret_cast<uintptr_t>(address) &
+ (kCagedHeapReservationAlignment - 1);
+ }
+
+ static uintptr_t BaseFromAddress(const void* address) {
+ return reinterpret_cast<uintptr_t>(address) &
+ ~(kCagedHeapReservationAlignment - 1);
+ }
+
+ CagedHeap(HeapBase& heap, PageAllocator& platform_allocator);
CagedHeap(const CagedHeap&) = delete;
CagedHeap& operator=(const CagedHeap&) = delete;
@@ -37,13 +47,13 @@ class CagedHeap final {
return *static_cast<CagedHeapLocalData*>(reserved_area_.address());
}
- static uintptr_t OffsetFromAddress(void* address) {
- return reinterpret_cast<uintptr_t>(address) &
- (kCagedHeapReservationAlignment - 1);
+ bool IsOnHeap(const void* address) const {
+ return reinterpret_cast<void*>(BaseFromAddress(address)) ==
+ reserved_area_.address();
}
private:
- VirtualMemory reserved_area_;
+ const VirtualMemory reserved_area_;
std::unique_ptr<AllocatorType> bounded_allocator_;
};
diff --git a/deps/v8/src/heap/cppgc/gc-info.cc b/deps/v8/src/heap/cppgc/gc-info.cc
index de57805dcb..4c555106fd 100644
--- a/deps/v8/src/heap/cppgc/gc-info.cc
+++ b/deps/v8/src/heap/cppgc/gc-info.cc
@@ -3,19 +3,86 @@
// found in the LICENSE file.
#include "include/cppgc/internal/gc-info.h"
+
+#include "include/cppgc/internal/name-trait.h"
#include "include/v8config.h"
#include "src/heap/cppgc/gc-info-table.h"
namespace cppgc {
namespace internal {
-GCInfoIndex EnsureGCInfoIndex(std::atomic<GCInfoIndex>& registered_index,
- FinalizationCallback finalization_callback,
- TraceCallback trace_callback,
- NameCallback name_callback, bool has_v_table) {
+namespace {
+
+HeapObjectName GetHiddenName(const void*) {
+ return {NameProvider::kHiddenName, true};
+}
+
+} // namespace
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
+ FinalizationCallback finalization_callback, NameCallback name_callback) {
return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index,
- {finalization_callback, trace_callback, name_callback, has_v_table});
+ {finalization_callback, trace_callback, name_callback, true});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
+ FinalizationCallback finalization_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index,
+ {finalization_callback, trace_callback, GetHiddenName, true});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
+ NameCallback name_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index, {nullptr, trace_callback, name_callback, true});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index, {nullptr, trace_callback, GetHiddenName, true});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
+ FinalizationCallback finalization_callback, NameCallback name_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index,
+ {finalization_callback, trace_callback, name_callback, false});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
+ FinalizationCallback finalization_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index,
+ {finalization_callback, trace_callback, GetHiddenName, false});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
+ NameCallback name_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index, {nullptr, trace_callback, name_callback, false});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index, {nullptr, trace_callback, GetHiddenName, false});
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/heap-base.cc b/deps/v8/src/heap/cppgc/heap-base.cc
index c89c2842f9..db16019b61 100644
--- a/deps/v8/src/heap/cppgc/heap-base.cc
+++ b/deps/v8/src/heap/cppgc/heap-base.cc
@@ -17,6 +17,7 @@
#include "src/heap/cppgc/marking-verifier.h"
#include "src/heap/cppgc/object-view.h"
#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/platform.h"
#include "src/heap/cppgc/prefinalizer-handler.h"
#include "src/heap/cppgc/stats-collector.h"
@@ -56,23 +57,26 @@ HeapBase::HeapBase(
StackSupport stack_support)
: raw_heap_(this, custom_spaces),
platform_(std::move(platform)),
+ oom_handler_(std::make_unique<FatalOutOfMemoryHandler>(this)),
#if defined(LEAK_SANITIZER)
lsan_page_allocator_(std::make_unique<v8::base::LsanPageAllocator>(
platform_->GetPageAllocator())),
#endif // LEAK_SANITIZER
#if defined(CPPGC_CAGED_HEAP)
- caged_heap_(this, page_allocator()),
- page_backend_(std::make_unique<PageBackend>(&caged_heap_.allocator())),
+ caged_heap_(*this, *page_allocator()),
+ page_backend_(std::make_unique<PageBackend>(caged_heap_.allocator(),
+ *oom_handler_.get())),
#else // !CPPGC_CAGED_HEAP
- page_backend_(std::make_unique<PageBackend>(page_allocator())),
+ page_backend_(std::make_unique<PageBackend>(*page_allocator(),
+ *oom_handler_.get())),
#endif // !CPPGC_CAGED_HEAP
stats_collector_(std::make_unique<StatsCollector>(platform_.get())),
stack_(std::make_unique<heap::base::Stack>(
v8::base::Stack::GetStackStart())),
prefinalizer_handler_(std::make_unique<PreFinalizerHandler>(*this)),
compactor_(raw_heap_),
- object_allocator_(&raw_heap_, page_backend_.get(),
- stats_collector_.get()),
+ object_allocator_(raw_heap_, *page_backend_, *stats_collector_,
+ *prefinalizer_handler_),
sweeper_(*this),
stack_support_(stack_support) {
stats_collector_->RegisterObserver(
@@ -96,10 +100,17 @@ size_t HeapBase::ObjectPayloadSize() const {
void HeapBase::AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded() {
if (marker_) marker_->AdvanceMarkingOnAllocation();
}
-void HeapBase::ExecutePreFinalizers() {
+
+size_t HeapBase::ExecutePreFinalizers() {
+#ifdef CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS
+ // Allocations in pre finalizers should not trigger another GC.
+ cppgc::subtle::NoGarbageCollectionScope no_gc_scope(*this);
+#else
// Pre finalizers are forbidden from allocating objects.
cppgc::subtle::DisallowGarbageCollectionScope no_gc_scope(*this);
+#endif // CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS
prefinalizer_handler_->InvokePreFinalizers();
+ return prefinalizer_handler_->ExtractBytesAllocatedInPrefinalizers();
}
void HeapBase::Terminate() {
@@ -110,6 +121,7 @@ void HeapBase::Terminate() {
constexpr size_t kMaxTerminationGCs = 20;
size_t gc_count = 0;
+ bool more_termination_gcs_needed = false;
do {
CHECK_LT(gc_count++, kMaxTerminationGCs);
@@ -132,7 +144,14 @@ void HeapBase::Terminate() {
{Sweeper::SweepingConfig::SweepingType::kAtomic,
Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep});
sweeper().NotifyDoneIfNeeded();
- } while (strong_persistent_region_.NodesInUse() > 0);
+ more_termination_gcs_needed =
+ strong_persistent_region_.NodesInUse() ||
+ weak_persistent_region_.NodesInUse() || [this]() {
+ PersistentRegionLock guard;
+ return strong_cross_thread_persistent_region_.NodesInUse() ||
+ weak_cross_thread_persistent_region_.NodesInUse();
+ }();
+ } while (more_termination_gcs_needed);
object_allocator().Terminate();
disallow_gc_scope_++;
diff --git a/deps/v8/src/heap/cppgc/heap-base.h b/deps/v8/src/heap/cppgc/heap-base.h
index 91f99b39cc..6196955a3e 100644
--- a/deps/v8/src/heap/cppgc/heap-base.h
+++ b/deps/v8/src/heap/cppgc/heap-base.h
@@ -18,6 +18,7 @@
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/metric-recorder.h"
#include "src/heap/cppgc/object-allocator.h"
+#include "src/heap/cppgc/platform.h"
#include "src/heap/cppgc/process-heap-statistics.h"
#include "src/heap/cppgc/process-heap.h"
#include "src/heap/cppgc/raw-heap.h"
@@ -65,6 +66,7 @@ namespace testing {
class TestWithHeap;
} // namespace testing
+class FatalOutOfMemoryHandler;
class PageBackend;
class PreFinalizerHandler;
class StatsCollector;
@@ -95,6 +97,11 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
cppgc::Platform* platform() { return platform_.get(); }
const cppgc::Platform* platform() const { return platform_.get(); }
+ FatalOutOfMemoryHandler& oom_handler() { return *oom_handler_.get(); }
+ const FatalOutOfMemoryHandler& oom_handler() const {
+ return *oom_handler_.get();
+ }
+
PageBackend* page_backend() { return page_backend_.get(); }
const PageBackend* page_backend() const { return page_backend_.get(); }
@@ -208,12 +215,14 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
bool IsMarking() const { return marker_.get(); }
- void ExecutePreFinalizers();
+ // Returns amount of bytes allocated while executing prefinalizers.
+ size_t ExecutePreFinalizers();
PageAllocator* page_allocator() const;
RawHeap raw_heap_;
std::shared_ptr<cppgc::Platform> platform_;
+ std::unique_ptr<FatalOutOfMemoryHandler> oom_handler_;
#if defined(LEAK_SANITIZER)
std::unique_ptr<v8::base::LsanPageAllocator> lsan_page_allocator_;
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.h b/deps/v8/src/heap/cppgc/heap-object-header.h
index a50d115e52..97a65fbf20 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header.h
+++ b/deps/v8/src/heap/cppgc/heap-object-header.h
@@ -91,6 +91,8 @@ class HeapObjectHeader {
void Unmark();
inline bool TryMarkAtomic();
+ inline void MarkNonAtomic();
+
template <AccessMode = AccessMode::kNonAtomic>
bool IsYoung() const;
@@ -266,6 +268,11 @@ bool HeapObjectHeader::TryMarkAtomic() {
std::memory_order_relaxed);
}
+void HeapObjectHeader::MarkNonAtomic() {
+ DCHECK(!IsMarked<AccessMode::kNonAtomic>());
+ encoded_low_ |= MarkBitField::encode(true);
+}
+
template <AccessMode mode>
bool HeapObjectHeader::IsYoung() const {
return !IsMarked<mode>();
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index 58252a20ab..a4e514a7c2 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -187,13 +187,17 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
marker_->FinishMarking(config_.stack_state);
}
marker_.reset();
- ExecutePreFinalizers();
- // TODO(chromium:1056170): replace build flag with dedicated flag.
-#if DEBUG
+ const size_t bytes_allocated_in_prefinalizers = ExecutePreFinalizers();
+#if CPPGC_VERIFY_HEAP
MarkingVerifier verifier(*this);
- verifier.Run(config_.stack_state, stack_end_of_current_gc(),
- stats_collector()->marked_bytes());
+ verifier.Run(
+ config_.stack_state, stack_end_of_current_gc(),
+ stats_collector()->marked_bytes() + bytes_allocated_in_prefinalizers);
+#endif // CPPGC_VERIFY_HEAP
+#ifndef CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS
+ DCHECK_EQ(0u, bytes_allocated_in_prefinalizers);
#endif
+ USE(bytes_allocated_in_prefinalizers);
subtle::NoGarbageCollectionScope no_gc(*this);
const Sweeper::SweepingConfig sweeping_config{
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index 549a9fe1da..e290787a59 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -38,7 +38,7 @@ bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Enter();
#if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().local_data().is_incremental_marking_in_progress = true;
-#endif
+#endif // defined(CPPGC_CAGED_HEAP)
return true;
}
return false;
@@ -52,7 +52,7 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Exit();
#if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().local_data().is_incremental_marking_in_progress = false;
-#endif
+#endif // defined(CPPGC_CAGED_HEAP)
return true;
}
return false;
@@ -421,7 +421,9 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) {
StatsCollector::EnabledScope stats_scope(
heap().stats_collector(), StatsCollector::kMarkTransitiveClosure);
+ bool saved_did_discover_new_ephemeron_pairs;
do {
+ mutator_marking_state_.ResetDidDiscoverNewEphemeronPairs();
if ((config_.marking_type == MarkingConfig::MarkingType::kAtomic) ||
schedule_.ShouldFlushEphemeronPairs()) {
mutator_marking_state_.FlushDiscoveredEphemeronPairs();
@@ -509,6 +511,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
}
}
+ saved_did_discover_new_ephemeron_pairs =
+ mutator_marking_state_.DidDiscoverNewEphemeronPairs();
{
StatsCollector::EnabledScope stats_scope(
heap().stats_collector(), StatsCollector::kMarkProcessEphemerons);
@@ -522,7 +526,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
return false;
}
}
- } while (!mutator_marking_state_.marking_worklist().IsLocalAndGlobalEmpty());
+ } while (!mutator_marking_state_.marking_worklist().IsLocalAndGlobalEmpty() ||
+ saved_did_discover_new_ephemeron_pairs);
return true;
}
diff --git a/deps/v8/src/heap/cppgc/marking-state.h b/deps/v8/src/heap/cppgc/marking-state.h
index 17e64e6fbe..864c8209b7 100644
--- a/deps/v8/src/heap/cppgc/marking-state.h
+++ b/deps/v8/src/heap/cppgc/marking-state.h
@@ -115,6 +115,14 @@ class MarkingStateBase {
movable_slots_worklist_.reset();
}
+ bool DidDiscoverNewEphemeronPairs() const {
+ return discovered_new_ephemeron_pairs_;
+ }
+
+ void ResetDidDiscoverNewEphemeronPairs() {
+ discovered_new_ephemeron_pairs_ = false;
+ }
+
protected:
inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
@@ -150,6 +158,8 @@ class MarkingStateBase {
movable_slots_worklist_;
size_t marked_bytes_ = 0;
+ bool in_ephemeron_processing_ = false;
+ bool discovered_new_ephemeron_pairs_ = false;
};
MarkingStateBase::MarkingStateBase(HeapBase& heap,
@@ -286,10 +296,16 @@ void MarkingStateBase::ProcessWeakContainer(const void* object,
void MarkingStateBase::ProcessEphemeron(const void* key, const void* value,
TraceDescriptor value_desc,
Visitor& visitor) {
+ // ProcessEphemeron is not expected to find new ephemerons recursively, which
+ // would break the main marking loop.
+ DCHECK(!in_ephemeron_processing_);
+ in_ephemeron_processing_ = true;
// Filter out already marked keys. The write barrier for WeakMember
// ensures that any newly set value after this point is kept alive and does
// not require the callback.
- if (HeapObjectHeader::FromObject(key).IsMarked<AccessMode::kAtomic>()) {
+ if (!HeapObjectHeader::FromObject(key)
+ .IsInConstruction<AccessMode::kAtomic>() &&
+ HeapObjectHeader::FromObject(key).IsMarked<AccessMode::kAtomic>()) {
if (value_desc.base_object_payload) {
MarkAndPush(value_desc.base_object_payload, value_desc);
} else {
@@ -297,9 +313,11 @@ void MarkingStateBase::ProcessEphemeron(const void* key, const void* value,
// should be immediately traced.
value_desc.callback(&visitor, value);
}
- return;
+ } else {
+ discovered_ephemeron_pairs_worklist_.Push({key, value, value_desc});
+ discovered_new_ephemeron_pairs_ = true;
}
- discovered_ephemeron_pairs_worklist_.Push({key, value, value_desc});
+ in_ephemeron_processing_ = false;
}
void MarkingStateBase::AccountMarkedBytes(const HeapObjectHeader& header) {
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.cc b/deps/v8/src/heap/cppgc/marking-verifier.cc
index 4d2ebcff1d..0dbda1159c 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.cc
+++ b/deps/v8/src/heap/cppgc/marking-verifier.cc
@@ -21,9 +21,9 @@ MarkingVerifierBase::MarkingVerifierBase(
verification_state_(verification_state),
visitor_(std::move(visitor)) {}
-void MarkingVerifierBase::Run(Heap::Config::StackState stack_state,
- uintptr_t stack_end,
- size_t expected_marked_bytes) {
+void MarkingVerifierBase::Run(
+ Heap::Config::StackState stack_state, uintptr_t stack_end,
+ v8::base::Optional<size_t> expected_marked_bytes) {
Traverse(heap_.raw_heap());
if (stack_state == Heap::Config::StackState::kMayContainHeapPointers) {
in_construction_objects_ = &in_construction_objects_stack_;
@@ -38,9 +38,9 @@ void MarkingVerifierBase::Run(Heap::Config::StackState stack_state,
in_construction_objects_heap_.find(header));
}
}
-#ifdef CPPGC_VERIFY_LIVE_BYTES
- CHECK_EQ(expected_marked_bytes, found_marked_bytes_);
-#endif // CPPGC_VERIFY_LIVE_BYTES
+ if (expected_marked_bytes) {
+ CHECK_EQ(expected_marked_bytes.value(), found_marked_bytes_);
+ }
}
void VerificationState::VerifyMarked(const void* base_object_payload) const {
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.h b/deps/v8/src/heap/cppgc/marking-verifier.h
index 72d49daa76..ca588f40d8 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.h
+++ b/deps/v8/src/heap/cppgc/marking-verifier.h
@@ -7,6 +7,7 @@
#include <unordered_set>
+#include "src/base/optional.h"
#include "src/heap/base/stack.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-visitor.h"
@@ -40,7 +41,7 @@ class V8_EXPORT_PRIVATE MarkingVerifierBase
MarkingVerifierBase(const MarkingVerifierBase&) = delete;
MarkingVerifierBase& operator=(const MarkingVerifierBase&) = delete;
- void Run(Heap::Config::StackState, uintptr_t, size_t);
+ void Run(Heap::Config::StackState, uintptr_t, v8::base::Optional<size_t>);
protected:
MarkingVerifierBase(HeapBase&, VerificationState&,
diff --git a/deps/v8/src/heap/cppgc/memory.cc b/deps/v8/src/heap/cppgc/memory.cc
index aa3baeaa8a..6d81957325 100644
--- a/deps/v8/src/heap/cppgc/memory.cc
+++ b/deps/v8/src/heap/cppgc/memory.cc
@@ -12,7 +12,7 @@ namespace cppgc {
namespace internal {
void NoSanitizeMemset(void* address, char c, size_t bytes) {
- volatile Address base = reinterpret_cast<Address>(address);
+ volatile uint8_t* const base = static_cast<uint8_t*>(address);
for (size_t i = 0; i < bytes; ++i) {
base[i] = c;
}
diff --git a/deps/v8/src/heap/cppgc/memory.h b/deps/v8/src/heap/cppgc/memory.h
index adc2ce9bb3..3b9f6cb62c 100644
--- a/deps/v8/src/heap/cppgc/memory.h
+++ b/deps/v8/src/heap/cppgc/memory.h
@@ -117,7 +117,11 @@ V8_INLINE void CheckMemoryIsInaccessible(const void* address, size_t size) {
static_assert(!CheckMemoryIsInaccessibleIsNoop(),
"CheckMemoryIsInaccessibleIsNoop() needs to reflect "
"CheckMemoryIsInaccessible().");
- ASAN_CHECK_MEMORY_REGION_IS_POISONED(address, size);
+ // Only check if memory is poisoned on 64 bit, since there we make sure that
+ // object sizes and alignments are multiple of shadow memory granularity.
+#if defined(V8_TARGET_ARCH_64_BIT)
+ ASAN_CHECK_WHOLE_MEMORY_REGION_IS_POISONED(address, size);
+#endif
ASAN_UNPOISON_MEMORY_REGION(address, size);
CheckMemoryIsZero(address, size);
ASAN_POISON_MEMORY_REGION(address, size);
diff --git a/deps/v8/src/heap/cppgc/object-allocator.cc b/deps/v8/src/heap/cppgc/object-allocator.cc
index 191e73e6d8..0f85d43c1c 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.cc
+++ b/deps/v8/src/heap/cppgc/object-allocator.cc
@@ -16,6 +16,7 @@
#include "src/heap/cppgc/memory.h"
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/prefinalizer-handler.h"
#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/sweeper.h"
@@ -39,7 +40,7 @@ void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
? RoundUp(offset_end, kEntrySize)
: RoundDown(offset_end, kEntrySize);
- auto& age_table = page->heap()->caged_heap().local_data().age_table;
+ auto& age_table = page->heap().caged_heap().local_data().age_table;
for (auto offset = young_offset_begin; offset < young_offset_end;
offset += AgeTable::kEntrySizeInBytes) {
age_table[offset] = AgeTable::Age::kYoung;
@@ -82,16 +83,16 @@ void ReplaceLinearAllocationBuffer(NormalPageSpace& space,
}
}
-void* AllocateLargeObject(PageBackend* page_backend, LargePageSpace* space,
- StatsCollector* stats_collector, size_t size,
+void* AllocateLargeObject(PageBackend& page_backend, LargePageSpace& space,
+ StatsCollector& stats_collector, size_t size,
GCInfoIndex gcinfo) {
- LargePage* page = LargePage::Create(*page_backend, *space, size);
- space->AddPage(page);
+ LargePage* page = LargePage::Create(page_backend, space, size);
+ space.AddPage(page);
auto* header = new (page->ObjectHeader())
HeapObjectHeader(HeapObjectHeader::kLargeObjectSizeInHeader, gcinfo);
- stats_collector->NotifyAllocation(size);
+ stats_collector.NotifyAllocation(size);
MarkRangeAsYoung(page, page->PayloadStart(), page->PayloadEnd());
return header->ObjectStart();
@@ -101,17 +102,29 @@ void* AllocateLargeObject(PageBackend* page_backend, LargePageSpace* space,
constexpr size_t ObjectAllocator::kSmallestSpaceSize;
-ObjectAllocator::ObjectAllocator(RawHeap* heap, PageBackend* page_backend,
- StatsCollector* stats_collector)
+ObjectAllocator::ObjectAllocator(RawHeap& heap, PageBackend& page_backend,
+ StatsCollector& stats_collector,
+ PreFinalizerHandler& prefinalizer_handler)
: raw_heap_(heap),
page_backend_(page_backend),
- stats_collector_(stats_collector) {}
+ stats_collector_(stats_collector),
+ prefinalizer_handler_(prefinalizer_handler) {}
void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace& space, size_t size,
GCInfoIndex gcinfo) {
void* memory = OutOfLineAllocateImpl(space, size, gcinfo);
- stats_collector_->NotifySafePointForConservativeCollection();
- raw_heap_->heap()->AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded();
+ stats_collector_.NotifySafePointForConservativeCollection();
+ raw_heap_.heap()->AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded();
+ if (prefinalizer_handler_.IsInvokingPreFinalizers()) {
+ // Objects allocated during pre finalizers should be allocated as black
+ // since marking is already done. Atomics are not needed because there is
+ // no concurrent marking in the background.
+ HeapObjectHeader::FromObject(memory).MarkNonAtomic();
+ // Resetting the allocation buffer forces all further allocations in pre
+ // finalizers to go through this slow path.
+ ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
+ prefinalizer_handler_.NotifyAllocationInPrefinalizer(size);
+ }
return memory;
}
@@ -124,8 +137,8 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
// 1. If this allocation is big enough, allocate a large object.
if (size >= kLargeObjectSizeThreshold) {
- auto* large_space = &LargePageSpace::From(
- *raw_heap_->Space(RawHeap::RegularSpaceType::kLarge));
+ auto& large_space = LargePageSpace::From(
+ *raw_heap_.Space(RawHeap::RegularSpaceType::kLarge));
return AllocateLargeObject(page_backend_, large_space, stats_collector_,
size, gcinfo);
}
@@ -137,7 +150,7 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
// 3. Lazily sweep pages of this heap until we find a freed area for
// this allocation or we finish sweeping all pages of this heap.
- Sweeper& sweeper = raw_heap_->heap()->sweeper();
+ Sweeper& sweeper = raw_heap_.heap()->sweeper();
// TODO(chromium:1056170): Investigate whether this should be a loop which
// would result in more agressive re-use of memory at the expense of
// potentially larger allocation time.
@@ -159,11 +172,11 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
// TODO(chromium:1056170): Make use of the synchronously freed memory.
// 5. Add a new page to this heap.
- auto* new_page = NormalPage::Create(*page_backend_, space);
+ auto* new_page = NormalPage::Create(page_backend_, space);
space.AddPage(new_page);
// 6. Set linear allocation buffer to new page.
- ReplaceLinearAllocationBuffer(space, *stats_collector_,
+ ReplaceLinearAllocationBuffer(space, stats_collector_,
new_page->PayloadStart(),
new_page->PayloadSize());
@@ -182,13 +195,12 @@ void* ObjectAllocator::AllocateFromFreeList(NormalPageSpace& space, size_t size,
// Assume discarded memory on that page is now zero.
auto& page = *NormalPage::From(BasePage::FromPayload(entry.address));
if (page.discarded_memory()) {
- stats_collector_->DecrementDiscardedMemory(page.discarded_memory());
+ stats_collector_.DecrementDiscardedMemory(page.discarded_memory());
page.ResetDiscardedMemory();
}
- ReplaceLinearAllocationBuffer(space, *stats_collector_,
- static_cast<Address>(entry.address),
- entry.size);
+ ReplaceLinearAllocationBuffer(
+ space, stats_collector_, static_cast<Address>(entry.address), entry.size);
return AllocateObjectOnSpace(space, size, gcinfo);
}
@@ -196,20 +208,20 @@ void* ObjectAllocator::AllocateFromFreeList(NormalPageSpace& space, size_t size,
void ObjectAllocator::ResetLinearAllocationBuffers() {
class Resetter : public HeapVisitor<Resetter> {
public:
- explicit Resetter(StatsCollector* stats) : stats_collector_(stats) {}
+ explicit Resetter(StatsCollector& stats) : stats_collector_(stats) {}
bool VisitLargePageSpace(LargePageSpace&) { return true; }
bool VisitNormalPageSpace(NormalPageSpace& space) {
- ReplaceLinearAllocationBuffer(space, *stats_collector_, nullptr, 0);
+ ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
return true;
}
private:
- StatsCollector* stats_collector_;
+ StatsCollector& stats_collector_;
} visitor(stats_collector_);
- visitor.Traverse(*raw_heap_);
+ visitor.Traverse(raw_heap_);
}
void ObjectAllocator::Terminate() {
@@ -217,7 +229,7 @@ void ObjectAllocator::Terminate() {
}
bool ObjectAllocator::in_disallow_gc_scope() const {
- return raw_heap_->heap()->in_disallow_gc_scope();
+ return raw_heap_.heap()->in_disallow_gc_scope();
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/object-allocator.h b/deps/v8/src/heap/cppgc/object-allocator.h
index dd0035cfe9..c02115b667 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.h
+++ b/deps/v8/src/heap/cppgc/object-allocator.h
@@ -20,6 +20,7 @@ namespace cppgc {
namespace internal {
class ObjectAllocator;
+class PreFinalizerHandler;
} // namespace internal
class V8_EXPORT AllocationHandle {
@@ -37,8 +38,9 @@ class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
public:
static constexpr size_t kSmallestSpaceSize = 32;
- ObjectAllocator(RawHeap* heap, PageBackend* page_backend,
- StatsCollector* stats_collector);
+ ObjectAllocator(RawHeap& heap, PageBackend& page_backend,
+ StatsCollector& stats_collector,
+ PreFinalizerHandler& prefinalizer_handler);
inline void* AllocateObject(size_t size, GCInfoIndex gcinfo);
inline void* AllocateObject(size_t size, GCInfoIndex gcinfo,
@@ -63,9 +65,10 @@ class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
void* OutOfLineAllocateImpl(NormalPageSpace&, size_t, GCInfoIndex);
void* AllocateFromFreeList(NormalPageSpace&, size_t, GCInfoIndex);
- RawHeap* raw_heap_;
- PageBackend* page_backend_;
- StatsCollector* stats_collector_;
+ RawHeap& raw_heap_;
+ PageBackend& page_backend_;
+ StatsCollector& stats_collector_;
+ PreFinalizerHandler& prefinalizer_handler_;
};
void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
@@ -74,7 +77,7 @@ void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
const RawHeap::RegularSpaceType type =
GetInitialSpaceIndexForSize(allocation_size);
- return AllocateObjectOnSpace(NormalPageSpace::From(*raw_heap_->Space(type)),
+ return AllocateObjectOnSpace(NormalPageSpace::From(*raw_heap_.Space(type)),
allocation_size, gcinfo);
}
@@ -84,7 +87,7 @@ void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo,
const size_t allocation_size =
RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
return AllocateObjectOnSpace(
- NormalPageSpace::From(*raw_heap_->CustomSpace(space_index)),
+ NormalPageSpace::From(*raw_heap_.CustomSpace(space_index)),
allocation_size, gcinfo);
}
diff --git a/deps/v8/src/heap/cppgc/page-memory.cc b/deps/v8/src/heap/cppgc/page-memory.cc
index 49b44aff91..ed76f903e8 100644
--- a/deps/v8/src/heap/cppgc/page-memory.cc
+++ b/deps/v8/src/heap/cppgc/page-memory.cc
@@ -6,17 +6,21 @@
#include "src/base/macros.h"
#include "src/base/sanitizer/asan.h"
+#include "src/heap/cppgc/platform.h"
namespace cppgc {
namespace internal {
namespace {
-void Unprotect(PageAllocator* allocator, const PageMemory& page_memory) {
+void Unprotect(PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler,
+ const PageMemory& page_memory) {
if (SupportsCommittingGuardPages(allocator)) {
- CHECK(allocator->SetPermissions(page_memory.writeable_region().base(),
- page_memory.writeable_region().size(),
- PageAllocator::Permission::kReadWrite));
+ if (!allocator.SetPermissions(page_memory.writeable_region().base(),
+ page_memory.writeable_region().size(),
+ PageAllocator::Permission::kReadWrite)) {
+ oom_handler("Oilpan: Unprotecting memory.");
+ }
} else {
// No protection in case the allocator cannot commit at the required
// granularity. Only protect if the allocator supports committing at that
@@ -24,53 +28,66 @@ void Unprotect(PageAllocator* allocator, const PageMemory& page_memory) {
//
// The allocator needs to support committing the overall range.
CHECK_EQ(0u,
- page_memory.overall_region().size() % allocator->CommitPageSize());
- CHECK(allocator->SetPermissions(page_memory.overall_region().base(),
- page_memory.overall_region().size(),
- PageAllocator::Permission::kReadWrite));
+ page_memory.overall_region().size() % allocator.CommitPageSize());
+ if (!allocator.SetPermissions(page_memory.overall_region().base(),
+ page_memory.overall_region().size(),
+ PageAllocator::Permission::kReadWrite)) {
+ oom_handler("Oilpan: Unprotecting memory.");
+ }
}
}
-void Protect(PageAllocator* allocator, const PageMemory& page_memory) {
+void Protect(PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler,
+ const PageMemory& page_memory) {
if (SupportsCommittingGuardPages(allocator)) {
// Swap the same region, providing the OS with a chance for fast lookup and
// change.
- CHECK(allocator->SetPermissions(page_memory.writeable_region().base(),
- page_memory.writeable_region().size(),
- PageAllocator::Permission::kNoAccess));
+ if (!allocator.SetPermissions(page_memory.writeable_region().base(),
+ page_memory.writeable_region().size(),
+ PageAllocator::Permission::kNoAccess)) {
+ oom_handler("Oilpan: Protecting memory.");
+ }
} else {
// See Unprotect().
CHECK_EQ(0u,
- page_memory.overall_region().size() % allocator->CommitPageSize());
- CHECK(allocator->SetPermissions(page_memory.overall_region().base(),
- page_memory.overall_region().size(),
- PageAllocator::Permission::kNoAccess));
+ page_memory.overall_region().size() % allocator.CommitPageSize());
+ if (!allocator.SetPermissions(page_memory.overall_region().base(),
+ page_memory.overall_region().size(),
+ PageAllocator::Permission::kNoAccess)) {
+ oom_handler("Oilpan: Protecting memory.");
+ }
}
}
-MemoryRegion ReserveMemoryRegion(PageAllocator* allocator,
+MemoryRegion ReserveMemoryRegion(PageAllocator& allocator,
+ FatalOutOfMemoryHandler& oom_handler,
size_t allocation_size) {
void* region_memory =
- allocator->AllocatePages(nullptr, allocation_size, kPageSize,
- PageAllocator::Permission::kNoAccess);
+ allocator.AllocatePages(nullptr, allocation_size, kPageSize,
+ PageAllocator::Permission::kNoAccess);
+ if (!region_memory) {
+ oom_handler("Oilpan: Reserving memory.");
+ }
const MemoryRegion reserved_region(static_cast<Address>(region_memory),
allocation_size);
DCHECK_EQ(reserved_region.base() + allocation_size, reserved_region.end());
return reserved_region;
}
-void FreeMemoryRegion(PageAllocator* allocator,
+void FreeMemoryRegion(PageAllocator& allocator,
const MemoryRegion& reserved_region) {
// Make sure pages returned to OS are unpoisoned.
ASAN_UNPOISON_MEMORY_REGION(reserved_region.base(), reserved_region.size());
- allocator->FreePages(reserved_region.base(), reserved_region.size());
+ allocator.FreePages(reserved_region.base(), reserved_region.size());
}
} // namespace
-PageMemoryRegion::PageMemoryRegion(PageAllocator* allocator,
+PageMemoryRegion::PageMemoryRegion(PageAllocator& allocator,
+ FatalOutOfMemoryHandler& oom_handler,
MemoryRegion reserved_region, bool is_large)
: allocator_(allocator),
+ oom_handler_(oom_handler),
reserved_region_(reserved_region),
is_large_(is_large) {}
@@ -81,12 +98,14 @@ PageMemoryRegion::~PageMemoryRegion() {
// static
constexpr size_t NormalPageMemoryRegion::kNumPageRegions;
-NormalPageMemoryRegion::NormalPageMemoryRegion(PageAllocator* allocator)
- : PageMemoryRegion(allocator,
- ReserveMemoryRegion(
- allocator, RoundUp(kPageSize * kNumPageRegions,
- allocator->AllocatePageSize())),
- false) {
+NormalPageMemoryRegion::NormalPageMemoryRegion(
+ PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler)
+ : PageMemoryRegion(
+ allocator, oom_handler,
+ ReserveMemoryRegion(allocator, oom_handler,
+ RoundUp(kPageSize * kNumPageRegions,
+ allocator.AllocatePageSize())),
+ false) {
#ifdef DEBUG
for (size_t i = 0; i < kNumPageRegions; ++i) {
DCHECK_EQ(false, page_memories_in_use_[i]);
@@ -99,33 +118,35 @@ NormalPageMemoryRegion::~NormalPageMemoryRegion() = default;
void NormalPageMemoryRegion::Allocate(Address writeable_base) {
const size_t index = GetIndex(writeable_base);
ChangeUsed(index, true);
- Unprotect(allocator_, GetPageMemory(index));
+ Unprotect(allocator_, oom_handler_, GetPageMemory(index));
}
void NormalPageMemoryRegion::Free(Address writeable_base) {
const size_t index = GetIndex(writeable_base);
ChangeUsed(index, false);
- Protect(allocator_, GetPageMemory(index));
+ Protect(allocator_, oom_handler_, GetPageMemory(index));
}
void NormalPageMemoryRegion::UnprotectForTesting() {
for (size_t i = 0; i < kNumPageRegions; ++i) {
- Unprotect(allocator_, GetPageMemory(i));
+ Unprotect(allocator_, oom_handler_, GetPageMemory(i));
}
}
-LargePageMemoryRegion::LargePageMemoryRegion(PageAllocator* allocator,
- size_t length)
- : PageMemoryRegion(allocator,
- ReserveMemoryRegion(
- allocator, RoundUp(length + 2 * kGuardPageSize,
- allocator->AllocatePageSize())),
- true) {}
+LargePageMemoryRegion::LargePageMemoryRegion(
+ PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler,
+ size_t length)
+ : PageMemoryRegion(
+ allocator, oom_handler,
+ ReserveMemoryRegion(allocator, oom_handler,
+ RoundUp(length + 2 * kGuardPageSize,
+ allocator.AllocatePageSize())),
+ true) {}
LargePageMemoryRegion::~LargePageMemoryRegion() = default;
void LargePageMemoryRegion::UnprotectForTesting() {
- Unprotect(allocator_, GetPageMemory());
+ Unprotect(allocator_, oom_handler_, GetPageMemory());
}
PageMemoryRegionTree::PageMemoryRegionTree() = default;
@@ -165,27 +186,33 @@ std::pair<NormalPageMemoryRegion*, Address> NormalPageMemoryPool::Take(
return pair;
}
-PageBackend::PageBackend(PageAllocator* allocator) : allocator_(allocator) {}
+PageBackend::PageBackend(PageAllocator& allocator,
+ FatalOutOfMemoryHandler& oom_handler)
+ : allocator_(allocator), oom_handler_(oom_handler) {}
PageBackend::~PageBackend() = default;
Address PageBackend::AllocateNormalPageMemory(size_t bucket) {
+ v8::base::MutexGuard guard(&mutex_);
std::pair<NormalPageMemoryRegion*, Address> result = page_pool_.Take(bucket);
if (!result.first) {
- auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator_);
+ auto pmr =
+ std::make_unique<NormalPageMemoryRegion>(allocator_, oom_handler_);
for (size_t i = 0; i < NormalPageMemoryRegion::kNumPageRegions; ++i) {
page_pool_.Add(bucket, pmr.get(),
pmr->GetPageMemory(i).writeable_region().base());
}
page_memory_region_tree_.Add(pmr.get());
normal_page_memory_regions_.push_back(std::move(pmr));
- return AllocateNormalPageMemory(bucket);
+ result = page_pool_.Take(bucket);
+ DCHECK(result.first);
}
result.first->Allocate(result.second);
return result.second;
}
void PageBackend::FreeNormalPageMemory(size_t bucket, Address writeable_base) {
+ v8::base::MutexGuard guard(&mutex_);
auto* pmr = static_cast<NormalPageMemoryRegion*>(
page_memory_region_tree_.Lookup(writeable_base));
pmr->Free(writeable_base);
@@ -193,15 +220,18 @@ void PageBackend::FreeNormalPageMemory(size_t bucket, Address writeable_base) {
}
Address PageBackend::AllocateLargePageMemory(size_t size) {
- auto pmr = std::make_unique<LargePageMemoryRegion>(allocator_, size);
+ v8::base::MutexGuard guard(&mutex_);
+ auto pmr =
+ std::make_unique<LargePageMemoryRegion>(allocator_, oom_handler_, size);
const PageMemory pm = pmr->GetPageMemory();
- Unprotect(allocator_, pm);
+ Unprotect(allocator_, oom_handler_, pm);
page_memory_region_tree_.Add(pmr.get());
large_page_memory_regions_.insert(std::make_pair(pmr.get(), std::move(pmr)));
return pm.writeable_region().base();
}
void PageBackend::FreeLargePageMemory(Address writeable_base) {
+ v8::base::MutexGuard guard(&mutex_);
PageMemoryRegion* pmr = page_memory_region_tree_.Lookup(writeable_base);
page_memory_region_tree_.Remove(pmr);
auto size = large_page_memory_regions_.erase(pmr);
diff --git a/deps/v8/src/heap/cppgc/page-memory.h b/deps/v8/src/heap/cppgc/page-memory.h
index 51b2b61f7d..e5b73318f7 100644
--- a/deps/v8/src/heap/cppgc/page-memory.h
+++ b/deps/v8/src/heap/cppgc/page-memory.h
@@ -13,11 +13,14 @@
#include "include/cppgc/platform.h"
#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
#include "src/heap/cppgc/globals.h"
namespace cppgc {
namespace internal {
+class FatalOutOfMemoryHandler;
+
class V8_EXPORT_PRIVATE MemoryRegion final {
public:
MemoryRegion() = default;
@@ -79,9 +82,11 @@ class V8_EXPORT_PRIVATE PageMemoryRegion {
virtual void UnprotectForTesting() = 0;
protected:
- PageMemoryRegion(PageAllocator*, MemoryRegion, bool);
+ PageMemoryRegion(PageAllocator&, FatalOutOfMemoryHandler&, MemoryRegion,
+ bool);
- PageAllocator* const allocator_;
+ PageAllocator& allocator_;
+ FatalOutOfMemoryHandler& oom_handler_;
const MemoryRegion reserved_region_;
const bool is_large_;
};
@@ -91,7 +96,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
public:
static constexpr size_t kNumPageRegions = 10;
- explicit NormalPageMemoryRegion(PageAllocator*);
+ NormalPageMemoryRegion(PageAllocator&, FatalOutOfMemoryHandler&);
~NormalPageMemoryRegion() override;
const PageMemory GetPageMemory(size_t index) const {
@@ -133,7 +138,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
// LargePageMemoryRegion serves a single large PageMemory object.
class V8_EXPORT_PRIVATE LargePageMemoryRegion final : public PageMemoryRegion {
public:
- LargePageMemoryRegion(PageAllocator*, size_t);
+ LargePageMemoryRegion(PageAllocator&, FatalOutOfMemoryHandler&, size_t);
~LargePageMemoryRegion() override;
const PageMemory GetPageMemory() const {
@@ -193,7 +198,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryPool final {
// regions alive.
class V8_EXPORT_PRIVATE PageBackend final {
public:
- explicit PageBackend(PageAllocator*);
+ PageBackend(PageAllocator&, FatalOutOfMemoryHandler&);
~PageBackend();
// Allocates a normal page from the backend.
@@ -223,7 +228,10 @@ class V8_EXPORT_PRIVATE PageBackend final {
PageBackend& operator=(const PageBackend&) = delete;
private:
- PageAllocator* allocator_;
+ // Guards against concurrent uses of `Lookup()`.
+ mutable v8::base::Mutex mutex_;
+ PageAllocator& allocator_;
+ FatalOutOfMemoryHandler& oom_handler_;
NormalPageMemoryPool page_pool_;
PageMemoryRegionTree page_memory_region_tree_;
std::vector<std::unique_ptr<PageMemoryRegion>> normal_page_memory_regions_;
@@ -233,8 +241,8 @@ class V8_EXPORT_PRIVATE PageBackend final {
// Returns true if the provided allocator supports committing at the required
// granularity.
-inline bool SupportsCommittingGuardPages(PageAllocator* allocator) {
- return kGuardPageSize % allocator->CommitPageSize() == 0;
+inline bool SupportsCommittingGuardPages(PageAllocator& allocator) {
+ return kGuardPageSize % allocator.CommitPageSize() == 0;
}
Address NormalPageMemoryRegion::Lookup(ConstAddress address) const {
@@ -268,6 +276,7 @@ PageMemoryRegion* PageMemoryRegionTree::Lookup(ConstAddress address) const {
}
Address PageBackend::Lookup(ConstAddress address) const {
+ v8::base::MutexGuard guard(&mutex_);
PageMemoryRegion* pmr = page_memory_region_tree_.Lookup(address);
return pmr ? pmr->Lookup(address) : nullptr;
}
diff --git a/deps/v8/src/heap/cppgc/platform.cc b/deps/v8/src/heap/cppgc/platform.cc
index 90516d6065..fd769ae469 100644
--- a/deps/v8/src/heap/cppgc/platform.cc
+++ b/deps/v8/src/heap/cppgc/platform.cc
@@ -5,10 +5,38 @@
#include "include/cppgc/platform.h"
#include "src/base/lazy-instance.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
#include "src/base/platform/platform.h"
+#include "src/base/sanitizer/asan.h"
#include "src/heap/cppgc/gc-info-table.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/platform.h"
namespace cppgc {
+namespace internal {
+
+void Abort() { v8::base::OS::Abort(); }
+
+void FatalOutOfMemoryHandler::operator()(const std::string& reason,
+ const SourceLocation& loc) const {
+ if (custom_handler_) {
+ (*custom_handler_)(reason, loc, heap_);
+ FATAL("Custom out of memory handler should not have returned");
+ }
+#ifdef DEBUG
+ V8_Fatal(loc.FileName(), static_cast<int>(loc.Line()),
+ "Oilpan: Out of memory (%s)", reason.c_str());
+#else // !DEBUG
+ V8_Fatal("Oilpan: Out of memory");
+#endif // !DEBUG
+}
+
+void FatalOutOfMemoryHandler::SetCustomHandler(Callback* callback) {
+ custom_handler_ = callback;
+}
+
+} // namespace internal
namespace {
PageAllocator* g_page_allocator = nullptr;
@@ -20,6 +48,17 @@ TracingController* Platform::GetTracingController() {
}
void InitializeProcess(PageAllocator* page_allocator) {
+#if defined(V8_USE_ADDRESS_SANITIZER) && defined(V8_TARGET_ARCH_64_BIT)
+ // Retrieve asan's internal shadow memory granularity and check that Oilpan's
+ // object alignment/sizes are multiple of this granularity. This is needed to
+ // perform poisoness checks.
+ size_t shadow_scale;
+ __asan_get_shadow_mapping(&shadow_scale, nullptr);
+ DCHECK(shadow_scale);
+ const size_t poisoning_granularity = 1 << shadow_scale;
+ CHECK_EQ(0u, internal::kAllocationGranularity % poisoning_granularity);
+#endif
+
CHECK(!g_page_allocator);
internal::GlobalGCInfoTable::Initialize(page_allocator);
g_page_allocator = page_allocator;
@@ -27,9 +66,4 @@ void InitializeProcess(PageAllocator* page_allocator) {
void ShutdownProcess() { g_page_allocator = nullptr; }
-namespace internal {
-
-void Abort() { v8::base::OS::Abort(); }
-
-} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/platform.h b/deps/v8/src/heap/cppgc/platform.h
new file mode 100644
index 0000000000..2fba1ada1b
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/platform.h
@@ -0,0 +1,43 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_PLATFORM_H_
+#define V8_HEAP_CPPGC_PLATFORM_H_
+
+#include <string>
+
+#include "include/cppgc/source-location.h"
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+class HeapBase;
+
+class V8_EXPORT_PRIVATE FatalOutOfMemoryHandler final {
+ public:
+ using Callback = void(const std::string&, const SourceLocation&, HeapBase*);
+
+ FatalOutOfMemoryHandler() = default;
+ explicit FatalOutOfMemoryHandler(HeapBase* heap) : heap_(heap) {}
+
+ [[noreturn]] void operator()(
+ const std::string& reason = std::string(),
+ const SourceLocation& = SourceLocation::Current()) const;
+
+ void SetCustomHandler(Callback*);
+
+ // Disallow copy/move.
+ FatalOutOfMemoryHandler(const FatalOutOfMemoryHandler&) = delete;
+ FatalOutOfMemoryHandler& operator=(const FatalOutOfMemoryHandler&) = delete;
+
+ private:
+ HeapBase* heap_ = nullptr;
+ Callback* custom_handler_ = nullptr;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_PLATFORM_H_
diff --git a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
index c05f06f6b0..9f641d6f4b 100644
--- a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
+++ b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
@@ -31,7 +31,8 @@ bool PreFinalizerRegistrationDispatcher::PreFinalizer::operator==(
}
PreFinalizerHandler::PreFinalizerHandler(HeapBase& heap)
- : heap_(heap)
+ : current_ordered_pre_finalizers_(&ordered_pre_finalizers_),
+ heap_(heap)
#ifdef DEBUG
,
creation_thread_id_(v8::base::OS::GetCurrentThreadId())
@@ -44,7 +45,10 @@ void PreFinalizerHandler::RegisterPrefinalizer(PreFinalizer pre_finalizer) {
DCHECK_EQ(ordered_pre_finalizers_.end(),
std::find(ordered_pre_finalizers_.begin(),
ordered_pre_finalizers_.end(), pre_finalizer));
- ordered_pre_finalizers_.push_back(pre_finalizer);
+ DCHECK_EQ(current_ordered_pre_finalizers_->end(),
+ std::find(current_ordered_pre_finalizers_->begin(),
+ current_ordered_pre_finalizers_->end(), pre_finalizer));
+ current_ordered_pre_finalizers_->push_back(pre_finalizer);
}
void PreFinalizerHandler::InvokePreFinalizers() {
@@ -54,6 +58,13 @@ void PreFinalizerHandler::InvokePreFinalizers() {
DCHECK(CurrentThreadIsCreationThread());
LivenessBroker liveness_broker = LivenessBrokerFactory::Create();
is_invoking_ = true;
+ DCHECK_EQ(0u, bytes_allocated_in_prefinalizers);
+ // Reset all LABs to force allocations to the slow path for black allocation.
+ heap_.object_allocator().ResetLinearAllocationBuffers();
+ // Prefinalizers can allocate other objects with prefinalizers, which will
+ // modify ordered_pre_finalizers_ and break iterators.
+ std::vector<PreFinalizer> new_ordered_pre_finalizers;
+ current_ordered_pre_finalizers_ = &new_ordered_pre_finalizers;
ordered_pre_finalizers_.erase(
ordered_pre_finalizers_.begin(),
std::remove_if(ordered_pre_finalizers_.rbegin(),
@@ -62,6 +73,12 @@ void PreFinalizerHandler::InvokePreFinalizers() {
return (pf.callback)(liveness_broker, pf.object);
})
.base());
+ // Newly added objects with prefinalizers will always survive the current GC
+ // cycle, so it's safe to add them after clearing out the older prefinalizers.
+ ordered_pre_finalizers_.insert(ordered_pre_finalizers_.end(),
+ new_ordered_pre_finalizers.begin(),
+ new_ordered_pre_finalizers.end());
+ current_ordered_pre_finalizers_ = &ordered_pre_finalizers_;
is_invoking_ = false;
ordered_pre_finalizers_.shrink_to_fit();
}
@@ -74,5 +91,11 @@ bool PreFinalizerHandler::CurrentThreadIsCreationThread() {
#endif
}
+void PreFinalizerHandler::NotifyAllocationInPrefinalizer(size_t size) {
+ DCHECK_GT(bytes_allocated_in_prefinalizers + size,
+ bytes_allocated_in_prefinalizers);
+ bytes_allocated_in_prefinalizers += size;
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/prefinalizer-handler.h b/deps/v8/src/heap/cppgc/prefinalizer-handler.h
index e91931bf6f..bc17c99b18 100644
--- a/deps/v8/src/heap/cppgc/prefinalizer-handler.h
+++ b/deps/v8/src/heap/cppgc/prefinalizer-handler.h
@@ -27,6 +27,11 @@ class PreFinalizerHandler final {
bool IsInvokingPreFinalizers() const { return is_invoking_; }
+ void NotifyAllocationInPrefinalizer(size_t);
+ size_t ExtractBytesAllocatedInPrefinalizers() {
+ return std::exchange(bytes_allocated_in_prefinalizers, 0);
+ }
+
private:
// Checks that the current thread is the thread that created the heap.
bool CurrentThreadIsCreationThread();
@@ -36,12 +41,16 @@ class PreFinalizerHandler final {
// objects) for an object, by processing the ordered_pre_finalizers_
// back-to-front.
std::vector<PreFinalizer> ordered_pre_finalizers_;
+ std::vector<PreFinalizer>* current_ordered_pre_finalizers_;
HeapBase& heap_;
bool is_invoking_ = false;
#ifdef DEBUG
int creation_thread_id_;
#endif
+
+ // Counter of bytes allocated during prefinalizers.
+ size_t bytes_allocated_in_prefinalizers = 0u;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/stats-collector.cc b/deps/v8/src/heap/cppgc/stats-collector.cc
index 54b68f4c28..ce74fe53c8 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.cc
+++ b/deps/v8/src/heap/cppgc/stats-collector.cc
@@ -41,19 +41,19 @@ void StatsCollector::NotifyAllocation(size_t bytes) {
// The current GC may not have been started. This is ok as recording considers
// the whole time range between garbage collections.
allocated_bytes_since_safepoint_ += bytes;
-#ifdef CPPGC_VERIFY_LIVE_BYTES
- DCHECK_GE(live_bytes_ + bytes, live_bytes_);
- live_bytes_ += bytes;
-#endif // CPPGC_VERIFY_LIVE_BYTES
+#ifdef CPPGC_VERIFY_HEAP
+ DCHECK_GE(tracked_live_bytes_ + bytes, tracked_live_bytes_);
+ tracked_live_bytes_ += bytes;
+#endif // CPPGC_VERIFY_HEAP
}
void StatsCollector::NotifyExplicitFree(size_t bytes) {
// See IncreaseAllocatedObjectSize for lifetime of the counter.
explicitly_freed_bytes_since_safepoint_ += bytes;
-#ifdef CPPGC_VERIFY_LIVE_BYTES
- DCHECK_GE(live_bytes_, bytes);
- live_bytes_ -= bytes;
-#endif // CPPGC_VERIFY_LIVE_BYTES
+#ifdef CPPGC_VERIFY_HEAP
+ DCHECK_GE(tracked_live_bytes_, bytes);
+ tracked_live_bytes_ -= bytes;
+#endif // CPPGC_VERIFY_HEAP
}
void StatsCollector::NotifySafePointForConservativeCollection() {
@@ -124,9 +124,9 @@ void StatsCollector::NotifyMarkingCompleted(size_t marked_bytes) {
explicitly_freed_bytes_since_safepoint_;
allocated_bytes_since_safepoint_ = 0;
explicitly_freed_bytes_since_safepoint_ = 0;
-#ifdef CPPGC_VERIFY_LIVE_BYTES
- live_bytes_ = marked_bytes;
-#endif // CPPGC_VERIFY_LIVE_BYTES
+#ifdef CPPGC_VERIFY_HEAP
+ tracked_live_bytes_ = marked_bytes;
+#endif // CPPGC_VERIFY_HEAP
DCHECK_LE(memory_freed_bytes_since_end_of_marking_, memory_allocated_bytes_);
memory_allocated_bytes_ -= memory_freed_bytes_since_end_of_marking_;
diff --git a/deps/v8/src/heap/cppgc/stats-collector.h b/deps/v8/src/heap/cppgc/stats-collector.h
index d63d297c77..c3d8dbbfc0 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.h
+++ b/deps/v8/src/heap/cppgc/stats-collector.h
@@ -334,9 +334,10 @@ class V8_EXPORT_PRIVATE StatsCollector final {
// arithmetic for simplicity.
int64_t allocated_bytes_since_safepoint_ = 0;
int64_t explicitly_freed_bytes_since_safepoint_ = 0;
-#ifdef CPPGC_VERIFY_LIVE_BYTES
- size_t live_bytes_ = 0;
-#endif // CPPGC_VERIFY_LIVE_BYTES
+#ifdef CPPGC_VERIFY_HEAP
+ // Tracks live bytes for overflows.
+ size_t tracked_live_bytes_ = 0;
+#endif // CPPGC_VERIFY_HEAP
int64_t memory_allocated_bytes_ = 0;
int64_t memory_freed_bytes_since_end_of_marking_ = 0;
diff --git a/deps/v8/src/heap/cppgc/visitor.cc b/deps/v8/src/heap/cppgc/visitor.cc
index e871159b7b..2f786b99ac 100644
--- a/deps/v8/src/heap/cppgc/visitor.cc
+++ b/deps/v8/src/heap/cppgc/visitor.cc
@@ -5,7 +5,9 @@
#include "src/heap/cppgc/visitor.h"
#include "src/base/sanitizer/msan.h"
+#include "src/heap/cppgc/caged-heap.h"
#include "src/heap/cppgc/gc-info-table.h"
+#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/object-view.h"
@@ -50,6 +52,11 @@ void TraceConservatively(ConservativeTracingVisitor* conservative_visitor,
void ConservativeTracingVisitor::TraceConservativelyIfNeeded(
const void* address) {
+#if defined(CPPGC_CAGED_HEAP)
+ // TODO(chromium:1056170): Add support for SIMD in stack scanning.
+ if (V8_LIKELY(!heap_.caged_heap().IsOnHeap(address))) return;
+#endif
+
const BasePage* page = reinterpret_cast<const BasePage*>(
page_backend_.Lookup(static_cast<ConstAddress>(address)));
diff --git a/deps/v8/src/heap/cppgc/write-barrier.cc b/deps/v8/src/heap/cppgc/write-barrier.cc
index 6980e4c893..007abe3005 100644
--- a/deps/v8/src/heap/cppgc/write-barrier.cc
+++ b/deps/v8/src/heap/cppgc/write-barrier.cc
@@ -132,12 +132,12 @@ void WriteBarrier::GenerationalBarrierSlow(const CagedHeapLocalData& local_data,
// A write during atomic pause (e.g. pre-finalizer) may trigger the slow path
// of the barrier. This is a result of the order of bailouts where not marking
// results in applying the generational barrier.
- if (local_data.heap_base->in_atomic_pause()) return;
+ if (local_data.heap_base.in_atomic_pause()) return;
if (value_offset > 0 && age_table[value_offset] == AgeTable::Age::kOld)
return;
// Record slot.
- local_data.heap_base->remembered_slots().insert(const_cast<void*>(slot));
+ local_data.heap_base.remembered_slots().insert(const_cast<void*>(slot));
}
#endif // CPPGC_YOUNG_GENERATION
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index befb1a7e7a..1f15a7e826 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -6,7 +6,8 @@
#define V8_HEAP_EMBEDDER_TRACING_H_
#include "include/v8-cppgc.h"
-#include "include/v8.h"
+#include "include/v8-embedder-heap.h"
+#include "include/v8-traced-handle.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
diff --git a/deps/v8/src/heap/factory-base.cc b/deps/v8/src/heap/factory-base.cc
index 1e197b9302..2547d40f0c 100644
--- a/deps/v8/src/heap/factory-base.cc
+++ b/deps/v8/src/heap/factory-base.cc
@@ -809,8 +809,7 @@ HeapObject FactoryBase<Impl>::AllocateRawArray(int size,
(size >
isolate()->heap()->AsHeap()->MaxRegularHeapObjectSize(allocation)) &&
FLAG_use_marking_progress_bar) {
- BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(result);
- chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
+ LargePage::FromHeapObject(result)->ProgressBar().Enable();
}
return result;
}
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index 72d53014fd..b64db0abf9 100644
--- a/deps/v8/src/heap/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -71,6 +71,15 @@ ReadOnlyRoots Factory::read_only_roots() const {
return ReadOnlyRoots(isolate());
}
+Factory::CodeBuilder& Factory::CodeBuilder::set_interpreter_data(
+ Handle<HeapObject> interpreter_data) {
+ // This DCHECK requires this function to be in -inl.h.
+ DCHECK(interpreter_data->IsInterpreterData() ||
+ interpreter_data->IsBytecodeArray());
+ interpreter_data_ = interpreter_data;
+ return *this;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index 0c80e81f51..e995a49897 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -100,14 +100,15 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
kind_specific_flags_ == 0
? roots.trampoline_trivial_code_data_container_handle()
: roots.trampoline_promise_rejection_code_data_container_handle());
- DCHECK_EQ(canonical_code_data_container->kind_specific_flags(),
+ DCHECK_EQ(canonical_code_data_container->kind_specific_flags(kRelaxedLoad),
kind_specific_flags_);
data_container = canonical_code_data_container;
} else {
data_container = factory->NewCodeDataContainer(
0, read_only_data_container_ ? AllocationType::kReadOnly
: AllocationType::kOld);
- data_container->set_kind_specific_flags(kind_specific_flags_);
+ data_container->set_kind_specific_flags(kind_specific_flags_,
+ kRelaxedStore);
}
// Basic block profiling data for builtins is stored in the JS heap rather
@@ -161,10 +162,11 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
// passing IsPendingAllocation).
raw_code.set_inlined_bytecode_size(inlined_bytecode_size_);
raw_code.set_code_data_container(*data_container, kReleaseStore);
- raw_code.set_deoptimization_data(*deoptimization_data_);
if (kind_ == CodeKind::BASELINE) {
+ raw_code.set_bytecode_or_interpreter_data(*interpreter_data_);
raw_code.set_bytecode_offset_table(*position_table_);
} else {
+ raw_code.set_deoptimization_data(*deoptimization_data_);
raw_code.set_source_position_table(*position_table_);
}
raw_code.set_handler_table_offset(
@@ -312,7 +314,8 @@ void Factory::CodeBuilder::FinalizeOnHeapCode(Handle<Code> code,
Code::SizeFor(code_desc_.instruction_size() + code_desc_.metadata_size());
int size_to_trim = old_object_size - new_object_size;
DCHECK_GE(size_to_trim, 0);
- heap->UndoLastAllocationAt(code->address() + new_object_size, size_to_trim);
+ heap->CreateFillerObjectAt(code->address() + new_object_size, size_to_trim,
+ ClearRecordedSlots::kNo);
}
MaybeHandle<Code> Factory::NewEmptyCode(CodeKind kind, int buffer_size) {
@@ -456,16 +459,6 @@ Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1, Handle<Object> value2,
return handle(result, isolate());
}
-Handle<BaselineData> Factory::NewBaselineData(
- Handle<Code> code, Handle<HeapObject> function_data) {
- auto baseline_data =
- NewStructInternal<BaselineData>(BASELINE_DATA_TYPE, AllocationType::kOld);
- DisallowGarbageCollection no_gc;
- baseline_data.set_baseline_code(*code);
- baseline_data.set_data(*function_data);
- return handle(baseline_data, isolate());
-}
-
Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number,
const char* type_of, byte kind) {
@@ -512,8 +505,7 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(
if (!allocation.To(&result)) return MaybeHandle<FixedArray>();
if ((size > heap->MaxRegularHeapObjectSize(allocation_type)) &&
FLAG_use_marking_progress_bar) {
- BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(result);
- chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
+ LargePage::FromHeapObject(result)->ProgressBar().Enable();
}
DisallowGarbageCollection no_gc;
result.set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
@@ -2178,7 +2170,7 @@ Handle<CodeDataContainer> Factory::NewCodeDataContainer(
CodeDataContainer::cast(New(code_data_container_map(), allocation));
DisallowGarbageCollection no_gc;
data_container.set_next_code_link(*undefined_value(), SKIP_WRITE_BARRIER);
- data_container.set_kind_specific_flags(flags);
+ data_container.set_kind_specific_flags(flags, kRelaxedStore);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
data_container.AllocateExternalPointerEntries(isolate());
data_container.set_raw_code(Smi::zero(), SKIP_WRITE_BARRIER);
@@ -2198,7 +2190,7 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Builtins::CodeObjectIsExecutable(code->builtin_id());
Handle<Code> result = Builtins::GenerateOffHeapTrampolineFor(
isolate(), off_heap_entry,
- code->code_data_container(kAcquireLoad).kind_specific_flags(),
+ code->code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad),
generate_jump_to_instruction_stream);
// Trampolines may not contain any metadata since all metadata offsets,
@@ -2256,7 +2248,7 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Handle<Code> Factory::CopyCode(Handle<Code> code) {
Handle<CodeDataContainer> data_container = NewCodeDataContainer(
- code->code_data_container(kAcquireLoad).kind_specific_flags(),
+ code->code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad),
AllocationType::kOld);
Heap* heap = isolate()->heap();
@@ -2872,7 +2864,6 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
map, empty_byte_array(), buffer, byte_offset, byte_length));
JSTypedArray raw = *typed_array;
DisallowGarbageCollection no_gc;
- raw.AllocateExternalPointerEntries(isolate());
raw.set_length(length);
raw.SetOffHeapDataPtr(isolate(), buffer->backing_store(), byte_offset);
raw.set_is_length_tracking(false);
@@ -2887,7 +2878,6 @@ Handle<JSDataView> Factory::NewJSDataView(Handle<JSArrayBuffer> buffer,
isolate());
Handle<JSDataView> obj = Handle<JSDataView>::cast(NewJSArrayBufferView(
map, empty_fixed_array(), buffer, byte_offset, byte_length));
- obj->AllocateExternalPointerEntries(isolate());
obj->set_data_pointer(
isolate(), static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
return obj;
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 1acf9a65c2..355a8d5d6e 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -116,9 +116,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
return handle(obj, isolate());
}
- Handle<BaselineData> NewBaselineData(Handle<Code> code,
- Handle<HeapObject> function_data);
-
Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number, const char* type_of,
byte kind);
@@ -884,11 +881,15 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
CodeBuilder& set_deoptimization_data(
Handle<DeoptimizationData> deopt_data) {
+ DCHECK_NE(kind_, CodeKind::BASELINE);
DCHECK(!deopt_data.is_null());
deoptimization_data_ = deopt_data;
return *this;
}
+ inline CodeBuilder& set_interpreter_data(
+ Handle<HeapObject> interpreter_data);
+
CodeBuilder& set_is_turbofanned() {
DCHECK(!CodeKindIsUnoptimizedJSFunction(kind_));
is_turbofanned_ = true;
@@ -943,6 +944,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<ByteArray> position_table_;
Handle<DeoptimizationData> deoptimization_data_ =
DeoptimizationData::Empty(isolate_);
+ Handle<HeapObject> interpreter_data_;
BasicBlockProfilerData* profiler_data_ = nullptr;
bool is_executable_ = true;
bool read_only_data_container_ = false;
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 982b80bb89..0d0c4935a3 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -653,8 +653,8 @@ void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
// clang-format off
#define DICT(s) "{" << s << "}"
#define LIST(s) "[" << s << "]"
-#define ESCAPE(s) "\"" << s << "\""
-#define MEMBER(s) ESCAPE(s) << ":"
+#define QUOTE(s) "\"" << s << "\""
+#define MEMBER(s) QUOTE(s) << ":"
auto SpaceStatistics = [this](int space_index) {
HeapSpaceStatistics space_stats;
@@ -663,7 +663,7 @@ void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
std::stringstream stream;
stream << DICT(
MEMBER("name")
- << ESCAPE(BaseSpace::GetSpaceName(
+ << QUOTE(BaseSpace::GetSpaceName(
static_cast<AllocationSpace>(space_index)))
<< ","
MEMBER("size") << space_stats.space_size() << ","
@@ -674,7 +674,7 @@ void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
};
stream << DICT(
- MEMBER("isolate") << ESCAPE(reinterpret_cast<void*>(isolate())) << ","
+ MEMBER("isolate") << QUOTE(reinterpret_cast<void*>(isolate())) << ","
MEMBER("id") << gc_count() << ","
MEMBER("time_ms") << isolate()->time_millis_since_init() << ","
MEMBER("total_heap_size") << stats.total_heap_size() << ","
@@ -699,7 +699,7 @@ void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
#undef DICT
#undef LIST
-#undef ESCAPE
+#undef QUOTE
#undef MEMBER
// clang-format on
}
@@ -1929,14 +1929,7 @@ void Heap::StartIncrementalMarking(int gc_flags,
}
void Heap::CompleteSweepingFull() {
- TRACE_GC_EPOCH(tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
- ThreadKind::kMain);
-
- {
- TRACE_GC(tracer(), GCTracer::Scope::MC_COMPLETE_SWEEP_ARRAY_BUFFERS);
- array_buffer_sweeper()->EnsureFinished();
- }
-
+ array_buffer_sweeper()->EnsureFinished();
mark_compact_collector()->EnsureSweepingCompleted();
DCHECK(!mark_compact_collector()->sweeping_in_progress());
}
@@ -3476,15 +3469,6 @@ void Heap::RightTrimWeakFixedArray(WeakFixedArray object,
elements_to_trim * kTaggedSize);
}
-void Heap::UndoLastAllocationAt(Address addr, int size) {
- DCHECK_LE(0, size);
- if (size == 0) return;
- if (code_space_->TryFreeLast(addr, size)) {
- return;
- }
- CreateFillerObjectAt(addr, size, ClearRecordedSlots::kNo);
-}
-
template <typename T>
void Heap::CreateFillerForArray(T object, int elements_to_trim,
int bytes_to_trim) {
@@ -7171,7 +7155,7 @@ void Heap::WriteBarrierForRange(HeapObject object, TSlot start_slot,
if (incremental_marking()->IsMarking()) {
mode |= kDoMarking;
- if (!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
+ if (!source_page->ShouldSkipEvacuationSlotRecording()) {
mode |= kDoEvacuationSlotRecording;
}
}
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 61dea819f0..e2e6316ef5 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -15,8 +15,10 @@
// Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap here!
+#include "include/v8-callbacks.h"
+#include "include/v8-embedder-heap.h"
#include "include/v8-internal.h"
-#include "include/v8.h"
+#include "include/v8-isolate.h"
#include "src/base/atomic-utils.h"
#include "src/base/enum-set.h"
#include "src/base/platform/condition-variable.h"
@@ -577,8 +579,6 @@ class Heap {
int elements_to_trim);
void RightTrimWeakFixedArray(WeakFixedArray obj, int elements_to_trim);
- void UndoLastAllocationAt(Address addr, int size);
-
// Converts the given boolean condition to JavaScript boolean value.
inline Oddball ToBoolean(bool condition);
diff --git a/deps/v8/src/heap/large-spaces.cc b/deps/v8/src/heap/large-spaces.cc
index 1736fee60d..6cc5a4a868 100644
--- a/deps/v8/src/heap/large-spaces.cc
+++ b/deps/v8/src/heap/large-spaces.cc
@@ -230,7 +230,7 @@ void OldLargeObjectSpace::ClearMarkingStateOfLiveObjects() {
Marking::MarkWhite(marking_state->MarkBitFrom(obj));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
- chunk->ResetProgressBar();
+ chunk->ProgressBar().ResetIfEnabled();
marking_state->SetLiveBytes(chunk, 0);
}
DCHECK(marking_state->IsWhite(obj));
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 2210c73958..47865a6cc7 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -68,7 +68,7 @@ void MarkCompactCollector::RecordSlot(HeapObject object, ObjectSlot slot,
void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
HeapObject target) {
MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
- if (!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
+ if (!source_page->ShouldSkipEvacuationSlotRecording()) {
RecordSlot(source_page, slot, target);
}
}
@@ -76,7 +76,7 @@ void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
void MarkCompactCollector::RecordSlot(MemoryChunk* source_page,
HeapObjectSlot slot, HeapObject target) {
BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(target);
- if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>()) {
+ if (target_page->IsEvacuationCandidate()) {
if (V8_EXTERNAL_CODE_SPACE_BOOL &&
target_page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
RememberedSet<OLD_TO_CODE>::Insert<AccessMode::ATOMIC>(source_page,
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 0fffb4ea45..83983ae820 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -646,6 +646,9 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
void MarkCompactCollector::EnsureSweepingCompleted() {
if (!sweeper()->sweeping_in_progress()) return;
+ TRACE_GC_EPOCH(heap()->tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
+ ThreadKind::kMain);
+
sweeper()->EnsureCompleted();
heap()->old_space()->RefillFreeList();
heap()->code_space()->RefillFreeList();
@@ -1707,9 +1710,8 @@ void MarkCompactCollector::VisitObject(HeapObject obj) {
void MarkCompactCollector::RevisitObject(HeapObject obj) {
DCHECK(marking_state()->IsBlack(obj));
- DCHECK_IMPLIES(MemoryChunk::FromHeapObject(obj)->IsFlagSet(
- MemoryChunk::HAS_PROGRESS_BAR),
- 0u == MemoryChunk::FromHeapObject(obj)->ProgressBar());
+ DCHECK_IMPLIES(MemoryChunk::FromHeapObject(obj)->ProgressBar().IsEnabled(),
+ 0u == MemoryChunk::FromHeapObject(obj)->ProgressBar().Value());
MarkingVisitor::RevisitScope revisit(marking_visitor_.get());
marking_visitor_->Visit(obj.map(), obj);
}
@@ -2368,23 +2370,6 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
DCHECK(!shared_info.is_compiled());
}
-void MarkCompactCollector::MarkBaselineDataAsLive(BaselineData baseline_data) {
- if (non_atomic_marking_state()->IsBlackOrGrey(baseline_data)) return;
-
- // Mark baseline data as live.
- non_atomic_marking_state()->WhiteToBlack(baseline_data);
-
- // Record object slots.
- DCHECK(
- non_atomic_marking_state()->IsBlackOrGrey(baseline_data.baseline_code()));
- ObjectSlot code = baseline_data.RawField(BaselineData::kBaselineCodeOffset);
- RecordSlot(baseline_data, code, HeapObject::cast(*code));
-
- DCHECK(non_atomic_marking_state()->IsBlackOrGrey(baseline_data.data()));
- ObjectSlot data = baseline_data.RawField(BaselineData::kDataOffset);
- RecordSlot(baseline_data, data, HeapObject::cast(*data));
-}
-
void MarkCompactCollector::ProcessOldCodeCandidates() {
DCHECK(FLAG_flush_bytecode || FLAG_flush_baseline_code ||
weak_objects_.code_flushing_candidates.IsEmpty());
@@ -2393,10 +2378,12 @@ void MarkCompactCollector::ProcessOldCodeCandidates() {
&flushing_candidate)) {
bool is_bytecode_live = non_atomic_marking_state()->IsBlackOrGrey(
flushing_candidate.GetBytecodeArray(isolate()));
- if (FLAG_flush_baseline_code && flushing_candidate.HasBaselineData()) {
- BaselineData baseline_data = flushing_candidate.baseline_data();
- if (non_atomic_marking_state()->IsBlackOrGrey(
- baseline_data.baseline_code())) {
+ if (FLAG_flush_baseline_code && flushing_candidate.HasBaselineCode()) {
+ CodeT baseline_codet =
+ CodeT::cast(flushing_candidate.function_data(kAcquireLoad));
+ // Safe to do a relaxed load here since the CodeT was acquire-loaded.
+ Code baseline_code = FromCodeT(baseline_codet, kRelaxedLoad);
+ if (non_atomic_marking_state()->IsBlackOrGrey(baseline_code)) {
// Currently baseline code holds bytecode array strongly and it is
// always ensured that bytecode is live if baseline code is live. Hence
// baseline code can safely load bytecode array without any additional
@@ -2404,19 +2391,23 @@ void MarkCompactCollector::ProcessOldCodeCandidates() {
// flush code if the bytecode is not live and also update baseline code
// to bailout if there is no bytecode.
DCHECK(is_bytecode_live);
- MarkBaselineDataAsLive(baseline_data);
+
+ // Regardless of whether the CodeT is a CodeDataContainer or the Code
+ // itself, if the Code is live then the CodeT has to be live and will
+ // have been marked via the owning JSFunction.
+ DCHECK(non_atomic_marking_state()->IsBlackOrGrey(baseline_codet));
} else if (is_bytecode_live) {
// If baseline code is flushed but we have a valid bytecode array reset
- // the function_data field to BytecodeArray.
- flushing_candidate.set_function_data(baseline_data.data(),
- kReleaseStore);
+ // the function_data field to the BytecodeArray/InterpreterData.
+ flushing_candidate.set_function_data(
+ baseline_code.bytecode_or_interpreter_data(), kReleaseStore);
}
}
if (!is_bytecode_live) {
// If baseline code flushing is disabled we should only flush bytecode
// from functions that don't have baseline data.
- DCHECK(FLAG_flush_baseline_code || !flushing_candidate.HasBaselineData());
+ DCHECK(FLAG_flush_baseline_code || !flushing_candidate.HasBaselineCode());
// If the BytecodeArray is dead, flush it, which will replace the field
// with an uncompiled data object.
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 9ce993898c..8be25e0914 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -670,10 +670,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Flushes a weakly held bytecode array from a shared function info.
void FlushBytecodeFromSFI(SharedFunctionInfo shared_info);
- // Marks the BaselineData as live and records the slots of baseline data
- // fields. This assumes that the objects in the data fields are alive.
- void MarkBaselineDataAsLive(BaselineData baseline_data);
-
// Clears bytecode arrays / baseline code that have not been executed for
// multiple collections.
void ProcessOldCodeCandidates();
diff --git a/deps/v8/src/heap/marking-visitor-inl.h b/deps/v8/src/heap/marking-visitor-inl.h
index fe8661c516..39d446aa3a 100644
--- a/deps/v8/src/heap/marking-visitor-inl.h
+++ b/deps/v8/src/heap/marking-visitor-inl.h
@@ -8,6 +8,7 @@
#include "src/heap/marking-visitor.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
+#include "src/heap/progress-bar.h"
#include "src/heap/spaces.h"
#include "src/objects/objects.h"
#include "src/objects/smi.h"
@@ -185,11 +186,13 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitSharedFunctionInfo(
// If bytecode flushing is disabled but baseline code flushing is enabled
// then we have to visit the bytecode but not the baseline code.
DCHECK(IsBaselineCodeFlushingEnabled(code_flush_mode_));
- BaselineData baseline_data =
- BaselineData::cast(shared_info.function_data(kAcquireLoad));
- // Visit the bytecode hanging off baseline data.
- VisitPointer(baseline_data,
- baseline_data.RawField(BaselineData::kDataOffset));
+ CodeT baseline_codet = CodeT::cast(shared_info.function_data(kAcquireLoad));
+ // Safe to do a relaxed load here since the CodeT was acquire-loaded.
+ Code baseline_code = FromCodeT(baseline_codet, kRelaxedLoad);
+ // Visit the bytecode hanging off baseline code.
+ VisitPointer(baseline_code,
+ baseline_code.RawField(
+ Code::kDeoptimizationDataOrInterpreterDataOffset));
weak_objects_->code_flushing_candidates.Push(task_id_, shared_info);
} else {
// In other cases, record as a flushing candidate since we have old
@@ -206,13 +209,13 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitSharedFunctionInfo(
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
VisitFixedArrayWithProgressBar(Map map, FixedArray object,
- MemoryChunk* chunk) {
+ ProgressBar& progress_bar) {
const int kProgressBarScanningChunk = kMaxRegularHeapObjectSize;
STATIC_ASSERT(kMaxRegularHeapObjectSize % kTaggedSize == 0);
DCHECK(concrete_visitor()->marking_state()->IsBlackOrGrey(object));
concrete_visitor()->marking_state()->GreyToBlack(object);
int size = FixedArray::BodyDescriptor::SizeOf(map, object);
- size_t current_progress_bar = chunk->ProgressBar();
+ size_t current_progress_bar = progress_bar.Value();
int start = static_cast<int>(current_progress_bar);
if (start == 0) {
this->VisitMapPointer(object);
@@ -221,7 +224,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
int end = std::min(size, start + kProgressBarScanningChunk);
if (start < end) {
VisitPointers(object, object.RawField(start), object.RawField(end));
- bool success = chunk->TrySetProgressBar(current_progress_bar, end);
+ bool success = progress_bar.TrySetNewValue(current_progress_bar, end);
CHECK(success);
if (end < size) {
// The object can be pushed back onto the marking worklist only after
@@ -237,9 +240,10 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitFixedArray(
Map map, FixedArray object) {
// Arrays with the progress bar are not left-trimmable because they reside
// in the large object space.
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
- return chunk->IsFlagSet<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR)
- ? VisitFixedArrayWithProgressBar(map, object, chunk)
+ ProgressBar& progress_bar =
+ MemoryChunk::FromHeapObject(object)->ProgressBar();
+ return progress_bar.IsEnabled()
+ ? VisitFixedArrayWithProgressBar(map, object, progress_bar)
: concrete_visitor()->VisitLeftTrimmableArray(map, object);
}
diff --git a/deps/v8/src/heap/marking-visitor.h b/deps/v8/src/heap/marking-visitor.h
index 555b2e8118..6a016a143e 100644
--- a/deps/v8/src/heap/marking-visitor.h
+++ b/deps/v8/src/heap/marking-visitor.h
@@ -193,7 +193,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
template <typename T>
int VisitEmbedderTracingSubclass(Map map, T object);
V8_INLINE int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
- MemoryChunk* chunk);
+ ProgressBar& progress_bar);
// Marks the descriptor array black without pushing it on the marking work
// list and visits its header. Returns the size of the descriptor array
// if it was successully marked as black.
diff --git a/deps/v8/src/heap/memory-chunk-layout.h b/deps/v8/src/heap/memory-chunk-layout.h
index f37583ab42..1b958f0cbf 100644
--- a/deps/v8/src/heap/memory-chunk-layout.h
+++ b/deps/v8/src/heap/memory-chunk-layout.h
@@ -7,6 +7,7 @@
#include "src/heap/heap.h"
#include "src/heap/list.h"
+#include "src/heap/progress-bar.h"
#include "src/heap/slot-set.h"
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
@@ -50,7 +51,7 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
FIELD(VirtualMemory, Reservation),
// MemoryChunk fields:
FIELD(SlotSet* [kNumSets], SlotSet),
- FIELD(std::atomic<size_t>, ProgressBar),
+ FIELD(ProgressBar, ProgressBar),
FIELD(std::atomic<intptr_t>, LiveByteCount),
FIELD(SlotSet*, SweepingSlotSet),
FIELD(TypedSlotsSet* [kNumSets], TypedSlotSet),
diff --git a/deps/v8/src/heap/memory-chunk.cc b/deps/v8/src/heap/memory-chunk.cc
index 0d9afdb1c7..29dbf74934 100644
--- a/deps/v8/src/heap/memory-chunk.cc
+++ b/deps/v8/src/heap/memory-chunk.cc
@@ -130,7 +130,7 @@ MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
// Not actually used but initialize anyway for predictability.
chunk->invalidated_slots_[OLD_TO_CODE] = nullptr;
}
- chunk->progress_bar_ = 0;
+ chunk->progress_bar_.Initialize();
chunk->set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
chunk->page_protection_change_mutex_ = new base::Mutex();
chunk->write_unprotect_counter_ = 0;
diff --git a/deps/v8/src/heap/memory-chunk.h b/deps/v8/src/heap/memory-chunk.h
index 66196c1f13..ad9ac72f83 100644
--- a/deps/v8/src/heap/memory-chunk.h
+++ b/deps/v8/src/heap/memory-chunk.h
@@ -162,22 +162,10 @@ class MemoryChunk : public BasicMemoryChunk {
// Approximate amount of physical memory committed for this chunk.
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
- size_t ProgressBar() {
- DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
- return progress_bar_.load(std::memory_order_acquire);
- }
-
- bool TrySetProgressBar(size_t old_value, size_t new_value) {
- DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
- return progress_bar_.compare_exchange_strong(old_value, new_value,
- std::memory_order_acq_rel);
- }
-
- void ResetProgressBar() {
- if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- progress_bar_.store(0, std::memory_order_release);
- }
+ class ProgressBar& ProgressBar() {
+ return progress_bar_;
}
+ const class ProgressBar& ProgressBar() const { return progress_bar_; }
inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount);
@@ -256,9 +244,9 @@ class MemoryChunk : public BasicMemoryChunk {
// is ceil(size() / kPageSize).
SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
- // Used by the incremental marker to keep track of the scanning progress in
- // large objects that have a progress bar and are scanned in increments.
- std::atomic<size_t> progress_bar_;
+ // Used by the marker to keep track of the scanning progress in large objects
+ // that have a progress bar and are scanned in increments.
+ class ProgressBar progress_bar_;
// Count of bytes marked black on page.
std::atomic<intptr_t> live_byte_count_;
diff --git a/deps/v8/src/heap/memory-measurement.cc b/deps/v8/src/heap/memory-measurement.cc
index a29ffb10e1..87cfb06faf 100644
--- a/deps/v8/src/heap/memory-measurement.cc
+++ b/deps/v8/src/heap/memory-measurement.cc
@@ -4,7 +4,7 @@
#include "src/heap/memory-measurement.h"
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
#include "src/api/api-inl.h"
#include "src/execution/isolate-inl.h"
#include "src/heap/factory-inl.h"
diff --git a/deps/v8/src/heap/memory-measurement.h b/deps/v8/src/heap/memory-measurement.h
index cf72c57abd..2b5377943c 100644
--- a/deps/v8/src/heap/memory-measurement.h
+++ b/deps/v8/src/heap/memory-measurement.h
@@ -8,6 +8,7 @@
#include <list>
#include <unordered_map>
+#include "include/v8-statistics.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/utils/random-number-generator.h"
#include "src/common/globals.h"
diff --git a/deps/v8/src/heap/new-spaces.cc b/deps/v8/src/heap/new-spaces.cc
index d08fe48f23..b935a585bc 100644
--- a/deps/v8/src/heap/new-spaces.cc
+++ b/deps/v8/src/heap/new-spaces.cc
@@ -57,7 +57,7 @@ bool SemiSpace::EnsureCurrentCapacity() {
memory_chunk_list_.Remove(current_page);
// Clear new space flags to avoid this page being treated as a new
// space page that is potentially being swept.
- current_page->SetFlags(0, Page::kIsInYoungGenerationMask);
+ current_page->ClearFlags(Page::kIsInYoungGenerationMask);
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
current_page);
current_page = next_current;
@@ -76,8 +76,7 @@ bool SemiSpace::EnsureCurrentCapacity() {
DCHECK_NOT_NULL(current_page);
memory_chunk_list_.PushBack(current_page);
marking_state->ClearLiveness(current_page);
- current_page->SetFlags(first_page()->GetFlags(),
- static_cast<uintptr_t>(Page::kCopyAllFlags));
+ current_page->SetFlags(first_page()->GetFlags(), Page::kAllFlagsMask);
heap()->CreateFillerObjectAt(current_page->area_start(),
static_cast<int>(current_page->area_size()),
ClearRecordedSlots::kNo);
@@ -214,7 +213,8 @@ void SemiSpace::ShrinkTo(size_t new_capacity) {
target_capacity_ = new_capacity;
}
-void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
+void SemiSpace::FixPagesFlags(Page::MainThreadFlags flags,
+ Page::MainThreadFlags mask) {
for (Page* page : *this) {
page->set_owner(this);
page->SetFlags(flags, mask);
@@ -253,8 +253,7 @@ void SemiSpace::RemovePage(Page* page) {
}
void SemiSpace::PrependPage(Page* page) {
- page->SetFlags(current_page()->GetFlags(),
- static_cast<uintptr_t>(Page::kCopyAllFlags));
+ page->SetFlags(current_page()->GetFlags(), Page::kAllFlagsMask);
page->set_owner(this);
memory_chunk_list_.PushFront(page);
current_capacity_ += Page::kPageSize;
@@ -276,7 +275,7 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
DCHECK(from->first_page());
DCHECK(to->first_page());
- intptr_t saved_to_space_flags = to->current_page()->GetFlags();
+ auto saved_to_space_flags = to->current_page()->GetFlags();
// We swap all properties but id_.
std::swap(from->target_capacity_, to->target_capacity_);
@@ -289,7 +288,7 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
to->external_backing_store_bytes_);
to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
- from->FixPagesFlags(0, 0);
+ from->FixPagesFlags(Page::NO_FLAGS, Page::NO_FLAGS);
}
void SemiSpace::set_age_mark(Address mark) {
diff --git a/deps/v8/src/heap/new-spaces.h b/deps/v8/src/heap/new-spaces.h
index 7f6f46c78b..45129acea1 100644
--- a/deps/v8/src/heap/new-spaces.h
+++ b/deps/v8/src/heap/new-spaces.h
@@ -173,7 +173,7 @@ class SemiSpace : public Space {
void RewindPages(int num_pages);
// Copies the flags into the masked positions on all pages in the space.
- void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
+ void FixPagesFlags(Page::MainThreadFlags flags, Page::MainThreadFlags mask);
// The currently committed space capacity.
size_t current_capacity_;
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index a33844743f..e3514c51fa 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -26,7 +26,7 @@ struct WeakListVisitor;
template <class T>
Object VisitWeakList(Heap* heap, Object list, WeakObjectRetainer* retainer) {
- Object undefined = ReadOnlyRoots(heap).undefined_value();
+ HeapObject undefined = ReadOnlyRoots(heap).undefined_value();
Object head = undefined;
T tail;
bool record_slots = MustRecordSlots(heap);
@@ -47,7 +47,7 @@ Object VisitWeakList(Heap* heap, Object list, WeakObjectRetainer* retainer) {
} else {
// Subsequent elements in the list.
DCHECK(!tail.is_null());
- WeakListVisitor<T>::SetWeakNext(tail, retained);
+ WeakListVisitor<T>::SetWeakNext(tail, HeapObject::cast(retained));
if (record_slots) {
HeapObject slot_holder = WeakListVisitor<T>::WeakNextHolder(tail);
int slot_offset = WeakListVisitor<T>::WeakNextOffset();
@@ -187,7 +187,7 @@ struct WeakListVisitor<AllocationSite> {
template <>
struct WeakListVisitor<JSFinalizationRegistry> {
- static void SetWeakNext(JSFinalizationRegistry obj, Object next) {
+ static void SetWeakNext(JSFinalizationRegistry obj, HeapObject next) {
obj.set_next_dirty(next, UPDATE_WEAK_WRITE_BARRIER);
}
diff --git a/deps/v8/src/heap/progress-bar.h b/deps/v8/src/heap/progress-bar.h
new file mode 100644
index 0000000000..b00558b684
--- /dev/null
+++ b/deps/v8/src/heap/progress-bar.h
@@ -0,0 +1,61 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_PROGRESS_BAR_H_
+#define V8_HEAP_PROGRESS_BAR_H_
+
+#include <atomic>
+#include <cstdint>
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+// The progress bar allows for keeping track of the bytes processed of a single
+// object. The progress bar itself must be enabled before it's used.
+//
+// Only large objects use the progress bar which is stored in their page header.
+// These objects are scanned in increments and will be kept black while being
+// scanned. Even if the mutator writes to them they will be kept black and a
+// white to grey transition is performed in the value.
+//
+// The progress bar starts as disabled. After enabling (through `Enable()`), it
+// can never be disabled again.
+class ProgressBar final {
+ public:
+ void Initialize() { value_ = kDisabledSentinel; }
+ void Enable() { value_ = 0; }
+ bool IsEnabled() const {
+ return value_.load(std::memory_order_acquire) != kDisabledSentinel;
+ }
+
+ size_t Value() const {
+ DCHECK(IsEnabled());
+ return value_.load(std::memory_order_acquire);
+ }
+
+ bool TrySetNewValue(size_t old_value, size_t new_value) {
+ DCHECK(IsEnabled());
+ DCHECK_NE(kDisabledSentinel, new_value);
+ return value_.compare_exchange_strong(old_value, new_value,
+ std::memory_order_acq_rel);
+ }
+
+ void ResetIfEnabled() {
+ if (IsEnabled()) {
+ value_.store(0, std::memory_order_release);
+ }
+ }
+
+ private:
+ static constexpr size_t kDisabledSentinel = SIZE_MAX;
+
+ std::atomic<size_t> value_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_PROGRESS_BAR_H_
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 08488eacd0..31e8c92258 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -59,7 +59,7 @@ Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
Handle<SharedFunctionInfo> shared =
isolate->factory()->NewSharedFunctionInfoForBuiltin(
isolate->factory()->empty_string(), builtin, kind);
- shared->set_internal_formal_parameter_count(len);
+ shared->set_internal_formal_parameter_count(JSParameterCount(len));
shared->set_length(len);
return shared;
}
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 4d3fd9411f..a1992c3e5e 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -45,6 +45,9 @@ namespace internal {
STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
+// static
+constexpr Page::MainThreadFlags Page::kCopyOnFlipFlagsMask;
+
void Page::AllocateFreeListCategories() {
DCHECK_NULL(categories_);
categories_ =
@@ -82,7 +85,7 @@ Page* Page::ConvertNewToOld(Page* old_page) {
DCHECK(old_page->InNewSpace());
OldSpace* old_space = old_page->heap()->old_space();
old_page->set_owner(old_space);
- old_page->SetFlags(0, static_cast<uintptr_t>(~0));
+ old_page->ClearFlags(Page::kAllFlagsMask);
Page* new_page = old_space->InitializePage(old_page);
old_space->AddPage(new_page);
return new_page;
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 6a047fd375..eb71467f78 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -211,13 +211,11 @@ STATIC_ASSERT(sizeof(std::atomic<intptr_t>) == kSystemPointerSize);
// Page* p = Page::FromAllocationAreaAddress(address);
class Page : public MemoryChunk {
public:
- static const intptr_t kCopyAllFlags = ~0;
-
// Page flags copied from from-space to to-space when flipping semispaces.
- static const intptr_t kCopyOnFlipFlagsMask =
- static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
- static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
- static_cast<intptr_t>(MemoryChunk::INCREMENTAL_MARKING);
+ static constexpr MainThreadFlags kCopyOnFlipFlagsMask =
+ MainThreadFlags(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+ MainThreadFlags(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
+ MainThreadFlags(MemoryChunk::INCREMENTAL_MARKING);
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[. This only works if the object
diff --git a/deps/v8/src/heap/third-party/heap-api.h b/deps/v8/src/heap/third-party/heap-api.h
index 9354c7bca8..2a7409040b 100644
--- a/deps/v8/src/heap/third-party/heap-api.h
+++ b/deps/v8/src/heap/third-party/heap-api.h
@@ -5,11 +5,13 @@
#ifndef V8_HEAP_THIRD_PARTY_HEAP_API_H_
#define V8_HEAP_THIRD_PARTY_HEAP_API_H_
-#include "include/v8.h"
#include "src/base/address-region.h"
#include "src/heap/heap.h"
namespace v8 {
+
+class Isolate;
+
namespace internal {
namespace third_party_heap {
diff --git a/deps/v8/src/ic/OWNERS b/deps/v8/src/ic/OWNERS
index 3c99566e98..369dfdf31b 100644
--- a/deps/v8/src/ic/OWNERS
+++ b/deps/v8/src/ic/OWNERS
@@ -1,5 +1,4 @@
ishell@chromium.org
jkummerow@chromium.org
mvstanton@chromium.org
-mythria@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/init/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index 326944e13e..e81b74d440 100644
--- a/deps/v8/src/init/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -368,7 +368,8 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
// causing a map change.
JSObject::ForceSetPrototype(isolate_, global_proxy,
isolate_->factory()->null_value());
- global_proxy->map().SetConstructor(roots.null_value());
+ global_proxy->map().set_constructor_or_back_pointer(roots.null_value(),
+ kRelaxedStore);
if (FLAG_track_detached_contexts) {
isolate_->AddDetachedContext(env);
}
@@ -551,7 +552,7 @@ V8_NOINLINE Handle<JSFunction> SimpleCreateFunction(Isolate* isolate,
fun->shared().set_native(true);
if (adapt) {
- fun->shared().set_internal_formal_parameter_count(len);
+ fun->shared().set_internal_formal_parameter_count(JSParameterCount(len));
} else {
fun->shared().DontAdaptArguments();
}
@@ -1548,9 +1549,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, object_function, "seal",
Builtin::kObjectSeal, 1, false);
- Handle<JSFunction> object_create = SimpleInstallFunction(
- isolate_, object_function, "create", Builtin::kObjectCreate, 2, false);
- native_context()->set_object_create(*object_create);
+ SimpleInstallFunction(isolate_, object_function, "create",
+ Builtin::kObjectCreate, 2, false);
SimpleInstallFunction(isolate_, object_function, "defineProperties",
Builtin::kObjectDefineProperties, 2, true);
@@ -2375,7 +2375,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Context::PROMISE_FUNCTION_INDEX);
Handle<SharedFunctionInfo> shared(promise_fun->shared(), isolate_);
- shared->set_internal_formal_parameter_count(1);
+ shared->set_internal_formal_parameter_count(JSParameterCount(1));
shared->set_length(1);
InstallSpeciesGetter(isolate_, promise_fun);
@@ -2438,7 +2438,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallWithIntrinsicDefaultProto(isolate_, regexp_fun,
Context::REGEXP_FUNCTION_INDEX);
Handle<SharedFunctionInfo> shared(regexp_fun->shared(), isolate_);
- shared->set_internal_formal_parameter_count(2);
+ shared->set_internal_formal_parameter_count(JSParameterCount(2));
shared->set_length(2);
{
@@ -2462,7 +2462,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtin::kRegExpPrototypeFlagsGetter, true);
SimpleInstallGetter(isolate_, prototype, factory->global_string(),
Builtin::kRegExpPrototypeGlobalGetter, true);
- SimpleInstallGetter(isolate(), prototype, factory->has_indices_string(),
+ SimpleInstallGetter(isolate(), prototype, factory->hasIndices_string(),
Builtin::kRegExpPrototypeHasIndicesGetter, true);
SimpleInstallGetter(isolate_, prototype, factory->ignoreCase_string(),
Builtin::kRegExpPrototypeIgnoreCaseGetter, true);
@@ -2746,9 +2746,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, math, "cos", Builtin::kMathCos, 1, true);
SimpleInstallFunction(isolate_, math, "cosh", Builtin::kMathCosh, 1, true);
SimpleInstallFunction(isolate_, math, "exp", Builtin::kMathExp, 1, true);
- Handle<JSFunction> math_floor = SimpleInstallFunction(
- isolate_, math, "floor", Builtin::kMathFloor, 1, true);
- native_context()->set_math_floor(*math_floor);
+ SimpleInstallFunction(isolate_, math, "floor", Builtin::kMathFloor, 1,
+ true);
SimpleInstallFunction(isolate_, math, "fround", Builtin::kMathFround, 1,
true);
SimpleInstallFunction(isolate_, math, "hypot", Builtin::kMathHypot, 2,
@@ -2762,9 +2761,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
true);
SimpleInstallFunction(isolate_, math, "max", Builtin::kMathMax, 2, false);
SimpleInstallFunction(isolate_, math, "min", Builtin::kMathMin, 2, false);
- Handle<JSFunction> math_pow = SimpleInstallFunction(
- isolate_, math, "pow", Builtin::kMathPow, 2, true);
- native_context()->set_math_pow(*math_pow);
+ SimpleInstallFunction(isolate_, math, "pow", Builtin::kMathPow, 2, true);
SimpleInstallFunction(isolate_, math, "random", Builtin::kMathRandom, 0,
true);
SimpleInstallFunction(isolate_, math, "round", Builtin::kMathRound, 1,
@@ -3780,7 +3777,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_->proxy_map()->SetConstructor(*proxy_function);
- proxy_function->shared().set_internal_formal_parameter_count(2);
+ proxy_function->shared().set_internal_formal_parameter_count(
+ JSParameterCount(2));
proxy_function->shared().set_length(2);
native_context()->set_proxy_function(*proxy_function);
@@ -4129,10 +4127,9 @@ bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
Handle<String> script_name =
factory->NewStringFromUtf8(name).ToHandleChecked();
MaybeHandle<SharedFunctionInfo> maybe_function_info =
- Compiler::GetSharedFunctionInfoForScript(
- isolate, source, ScriptDetails(script_name), extension, nullptr,
- ScriptCompiler::kNoCompileOptions,
- ScriptCompiler::kNoCacheBecauseV8Extension, EXTENSION_CODE);
+ Compiler::GetSharedFunctionInfoForScriptWithExtension(
+ isolate, source, ScriptDetails(script_name), extension,
+ ScriptCompiler::kNoCompileOptions, EXTENSION_CODE);
if (!maybe_function_info.ToHandle(&function_info)) return false;
cache->Add(isolate, name, function_info);
}
@@ -4594,6 +4591,20 @@ void Genesis::InitializeGlobal_harmony_intl_locale_info() {
Builtin::kLocalePrototypeWeekInfo, true);
}
+void Genesis::InitializeGlobal_harmony_intl_enumeration() {
+ if (!FLAG_harmony_intl_enumeration) return;
+
+ Handle<JSObject> intl = Handle<JSObject>::cast(
+ JSReceiver::GetProperty(
+ isolate(),
+ Handle<JSReceiver>(native_context()->global_object(), isolate()),
+ factory()->InternalizeUtf8String("Intl"))
+ .ToHandleChecked());
+
+ SimpleInstallFunction(isolate(), intl, "supportedValuesOf",
+ Builtin::kIntlSupportedValuesOf, 0, false);
+}
+
#endif // V8_INTL_SUPPORT
Handle<JSFunction> Genesis::CreateArrayBuffer(
diff --git a/deps/v8/src/init/bootstrapper.h b/deps/v8/src/init/bootstrapper.h
index 19f028048e..b92e755c93 100644
--- a/deps/v8/src/init/bootstrapper.h
+++ b/deps/v8/src/init/bootstrapper.h
@@ -5,6 +5,9 @@
#ifndef V8_INIT_BOOTSTRAPPER_H_
#define V8_INIT_BOOTSTRAPPER_H_
+#include "include/v8-context.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-snapshot.h"
#include "src/heap/factory.h"
#include "src/objects/fixed-array.h"
#include "src/objects/shared-function-info.h"
diff --git a/deps/v8/src/init/heap-symbols.h b/deps/v8/src/init/heap-symbols.h
index d4737bf331..f30192526e 100644
--- a/deps/v8/src/init/heap-symbols.h
+++ b/deps/v8/src/init/heap-symbols.h
@@ -198,13 +198,14 @@
V(_, dot_string, ".") \
V(_, dot_switch_tag_string, ".switch_tag") \
V(_, dotAll_string, "dotAll") \
- V(_, enumerable_string, "enumerable") \
- V(_, element_string, "element") \
V(_, Error_string, "Error") \
- V(_, errors_string, "errors") \
+ V(_, EvalError_string, "EvalError") \
+ V(_, element_string, "element") \
+ V(_, enumerable_string, "enumerable") \
V(_, error_to_string, "[object Error]") \
+ V(_, errors_string, "errors") \
V(_, eval_string, "eval") \
- V(_, EvalError_string, "EvalError") \
+ V(_, exception_string, "exception") \
V(_, exec_string, "exec") \
V(_, false_string, "false") \
V(_, FinalizationRegistry_string, "FinalizationRegistry") \
@@ -226,7 +227,7 @@
V(_, groups_string, "groups") \
V(_, growable_string, "growable") \
V(_, has_string, "has") \
- V(_, has_indices_string, "hasIndices") \
+ V(_, hasIndices_string, "hasIndices") \
V(_, ignoreCase_string, "ignoreCase") \
V(_, illegal_access_string, "illegal access") \
V(_, illegal_argument_string, "illegal argument") \
diff --git a/deps/v8/src/init/isolate-allocator.cc b/deps/v8/src/init/isolate-allocator.cc
index a479f1ab94..34a24a348f 100644
--- a/deps/v8/src/init/isolate-allocator.cc
+++ b/deps/v8/src/init/isolate-allocator.cc
@@ -8,6 +8,7 @@
#include "src/common/ptr-compr.h"
#include "src/execution/isolate.h"
#include "src/heap/code-range.h"
+#include "src/init/vm-cage.h"
#include "src/utils/memcopy.h"
#include "src/utils/utils.h"
@@ -74,7 +75,28 @@ void IsolateAllocator::FreeProcessWidePtrComprCageForTesting() {
void IsolateAllocator::InitializeOncePerProcess() {
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
PtrComprCageReservationParams params;
- if (!GetProcessWidePtrComprCage()->InitReservation(params)) {
+ base::AddressRegion existing_reservation;
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ // TODO(chromium:1218005) avoid the name collision with
+ // v8::internal::VirtualMemoryCage and ideally figure out a clear naming
+ // scheme for the different types of virtual memory cages.
+
+ // For now, we allow the virtual memory cage to be disabled even when
+ // compiling with v8_enable_virtual_memory_cage. This fallback will be
+ // disallowed in the future, at the latest once ArrayBuffers are referenced
+ // through an offset rather than a raw pointer.
+ if (GetProcessWideVirtualMemoryCage()->is_disabled()) {
+ CHECK(kAllowBackingStoresOutsideDataCage);
+ } else {
+ auto cage = GetProcessWideVirtualMemoryCage();
+ CHECK(cage->is_initialized());
+ DCHECK_EQ(params.reservation_size, cage->pointer_cage_size());
+ existing_reservation = base::AddressRegion(cage->pointer_cage_base(),
+ cage->pointer_cage_size());
+ }
+#endif
+ if (!GetProcessWidePtrComprCage()->InitReservation(params,
+ existing_reservation)) {
V8::FatalProcessOutOfMemory(
nullptr,
"Failed to reserve virtual memory for process-wide V8 "
diff --git a/deps/v8/src/init/startup-data-util.cc b/deps/v8/src/init/startup-data-util.cc
index d480e3dcc2..ba3a123651 100644
--- a/deps/v8/src/init/startup-data-util.cc
+++ b/deps/v8/src/init/startup-data-util.cc
@@ -7,6 +7,8 @@
#include <stdlib.h>
#include <string.h>
+#include "include/v8-initialization.h"
+#include "include/v8-snapshot.h"
#include "src/base/file-utils.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
@@ -76,11 +78,6 @@ void LoadFromFile(const char* snapshot_blob) {
void InitializeExternalStartupData(const char* directory_path) {
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
const char* snapshot_name = "snapshot_blob.bin";
-#ifdef V8_MULTI_SNAPSHOTS
- if (!FLAG_untrusted_code_mitigations) {
- snapshot_name = "snapshot_blob_trusted.bin";
- }
-#endif
std::unique_ptr<char[]> snapshot =
base::RelativePath(directory_path, snapshot_name);
LoadFromFile(snapshot.get());
diff --git a/deps/v8/src/init/startup-data-util.h b/deps/v8/src/init/startup-data-util.h
index 5d49b0b1a1..90751e558e 100644
--- a/deps/v8/src/init/startup-data-util.h
+++ b/deps/v8/src/init/startup-data-util.h
@@ -5,8 +5,6 @@
#ifndef V8_INIT_STARTUP_DATA_UTIL_H_
#define V8_INIT_STARTUP_DATA_UTIL_H_
-#include "include/v8.h"
-
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/init/v8.cc b/deps/v8/src/init/v8.cc
index 70367d0697..7258ba8d93 100644
--- a/deps/v8/src/init/v8.cc
+++ b/deps/v8/src/init/v8.cc
@@ -20,6 +20,7 @@
#include "src/execution/runtime-profiler.h"
#include "src/execution/simulator.h"
#include "src/init/bootstrapper.h"
+#include "src/init/vm-cage.h"
#include "src/libsampler/sampler.h"
#include "src/objects/elements.h"
#include "src/objects/objects-inl.h"
@@ -73,6 +74,17 @@ void V8::TearDown() {
}
void V8::InitializeOncePerProcessImpl() {
+ CHECK(platform_);
+
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (!GetProcessWideVirtualMemoryCage()->is_initialized()) {
+ // For now, we still allow the cage to be disabled even if V8 was compiled
+ // with V8_VIRTUAL_MEMORY_CAGE. This will eventually be forbidden.
+ CHECK(kAllowBackingStoresOutsideDataCage);
+ GetProcessWideVirtualMemoryCage()->Disable();
+ }
+#endif
+
// Update logging information before enforcing flag implications.
bool* log_all_flags[] = {&FLAG_turbo_profiling_log_builtins,
&FLAG_log_all,
@@ -207,6 +219,15 @@ void V8::InitializePlatform(v8::Platform* platform) {
#endif
}
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+bool V8::InitializeVirtualMemoryCage() {
+ // Platform must have been initialized already.
+ CHECK(platform_);
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
+ return GetProcessWideVirtualMemoryCage()->Initialize(page_allocator);
+}
+#endif
+
void V8::ShutdownPlatform() {
CHECK(platform_);
#if defined(V8_OS_WIN) && defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
@@ -216,6 +237,13 @@ void V8::ShutdownPlatform() {
#endif
v8::tracing::TracingCategoryObserver::TearDown();
v8::base::SetPrintStackTrace(nullptr);
+
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ // TODO(chromium:1218005) alternatively, this could move to its own
+ // public TearDownVirtualMemoryCage function.
+ GetProcessWideVirtualMemoryCage()->TearDown();
+#endif
+
platform_ = nullptr;
}
diff --git a/deps/v8/src/init/v8.h b/deps/v8/src/init/v8.h
index a8cd6832cd..bbde9bfd13 100644
--- a/deps/v8/src/init/v8.h
+++ b/deps/v8/src/init/v8.h
@@ -29,6 +29,10 @@ class V8 : public AllStatic {
const char* location,
bool is_heap_oom = false);
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ static bool InitializeVirtualMemoryCage();
+#endif
+
static void InitializePlatform(v8::Platform* platform);
static void ShutdownPlatform();
V8_EXPORT_PRIVATE static v8::Platform* GetCurrentPlatform();
diff --git a/deps/v8/src/init/vm-cage.cc b/deps/v8/src/init/vm-cage.cc
new file mode 100644
index 0000000000..9d88e4085b
--- /dev/null
+++ b/deps/v8/src/init/vm-cage.cc
@@ -0,0 +1,81 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/init/vm-cage.h"
+
+#include "include/v8-internal.h"
+#include "src/base/bounded-page-allocator.h"
+#include "src/base/lazy-instance.h"
+#include "src/utils/allocation.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+
+bool V8VirtualMemoryCage::Initialize(PageAllocator* page_allocator) {
+ constexpr bool use_guard_regions = true;
+ return Initialize(page_allocator, kVirtualMemoryCageSize, use_guard_regions);
+}
+
+bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator,
+ size_t size, bool use_guard_regions) {
+ CHECK(!initialized_);
+ CHECK(!disabled_);
+ CHECK_GE(size, kVirtualMemoryCageMinimumSize);
+
+ size_t reservation_size = size;
+ if (use_guard_regions) {
+ reservation_size += 2 * kVirtualMemoryCageGuardRegionSize;
+ }
+
+ base_ = reinterpret_cast<Address>(page_allocator->AllocatePages(
+ nullptr, reservation_size, kVirtualMemoryCageAlignment,
+ PageAllocator::kNoAccess));
+ if (!base_) return false;
+
+ if (use_guard_regions) {
+ base_ += kVirtualMemoryCageGuardRegionSize;
+ has_guard_regions_ = true;
+ }
+
+ page_allocator_ = page_allocator;
+ size_ = size;
+
+ data_cage_page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
+ page_allocator_, data_cage_base(), data_cage_size(),
+ page_allocator_->AllocatePageSize());
+
+ initialized_ = true;
+
+ return true;
+}
+
+void V8VirtualMemoryCage::TearDown() {
+ if (initialized_) {
+ data_cage_page_allocator_.reset();
+ Address reservation_base = base_;
+ size_t reservation_size = size_;
+ if (has_guard_regions_) {
+ reservation_base -= kVirtualMemoryCageGuardRegionSize;
+ reservation_size += 2 * kVirtualMemoryCageGuardRegionSize;
+ }
+ CHECK(page_allocator_->FreePages(reinterpret_cast<void*>(reservation_base),
+ reservation_size));
+ page_allocator_ = nullptr;
+ base_ = kNullAddress;
+ size_ = 0;
+ initialized_ = false;
+ has_guard_regions_ = false;
+ }
+ disabled_ = false;
+}
+
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(V8VirtualMemoryCage,
+ GetProcessWideVirtualMemoryCage)
+
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/init/vm-cage.h b/deps/v8/src/init/vm-cage.h
new file mode 100644
index 0000000000..5fdd2ad6e0
--- /dev/null
+++ b/deps/v8/src/init/vm-cage.h
@@ -0,0 +1,130 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INIT_VM_CAGE_H_
+#define V8_INIT_VM_CAGE_H_
+
+#include "include/v8-internal.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+
+class PageAllocator;
+
+namespace internal {
+
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+
+/**
+ * V8 Virtual Memory Cage.
+ *
+ * When the virtual memory cage is enabled, v8 will place most of its objects
+ * inside a dedicated region of virtual address space. In particular, all v8
+ * heaps, inside which objects reference themselves using compressed (32-bit)
+ * pointers, are located at the start of the virtual memory cage (the "pointer
+ * cage") and pure memory buffers like ArrayBuffer backing stores, which
+ * themselves do not contain any pointers, are located in the remaining part of
+ * the cage (the "data cage"). These buffers will eventually be referenced from
+ * inside the v8 heap using offsets rather than pointers. It should then be
+ * assumed that an attacker is able to corrupt data arbitrarily and concurrently
+ * inside the virtual memory cage.
+ *
+ * As the embedder is responsible for providing ArrayBuffer allocators, v8
+ * exposes a page allocator for the data cage to the embedder.
+ *
+ * TODO(chromium:1218005) Maybe don't call the sub-regions "cages" as well to
+ * avoid confusion? In any case, the names should probably be identical to the
+ * internal names for these virtual memory regions (where they are currently
+ * called cages).
+ * TODO(chromium:1218005) come up with a coherent naming scheme for this class
+ * and the other "cages" in v8.
+ */
+class V8VirtualMemoryCage {
+ public:
+ // +- ~~~ -+----------------+----------------------- ~~~ -+- ~~~ -+
+ // | 32 GB | 4 GB | | 32 GB |
+ // +- ~~~ -+----------------+----------------------- ~~~ -+- ~~~ -+
+ // ^ ^ ^ ^
+ // Guard Pointer Cage Data Cage Guard
+ // Region (contains all (contains all ArrayBuffer and Region
+ // (front) V8 heaps) WASM memory backing stores) (back)
+ //
+ // | base ---------------- size ------------------> |
+
+ V8VirtualMemoryCage() = default;
+
+ V8VirtualMemoryCage(const V8VirtualMemoryCage&) = delete;
+ V8VirtualMemoryCage& operator=(V8VirtualMemoryCage&) = delete;
+
+ bool is_initialized() const { return initialized_; }
+ bool is_disabled() const { return disabled_; }
+ bool is_enabled() const { return !disabled_; }
+
+ bool Initialize(v8::PageAllocator* page_allocator);
+ void Disable() {
+ CHECK(!initialized_);
+ disabled_ = true;
+ }
+
+ void TearDown();
+
+ Address base() const { return base_; }
+ size_t size() const { return size_; }
+
+ Address pointer_cage_base() const { return base_; }
+ size_t pointer_cage_size() const { return kVirtualMemoryCagePointerCageSize; }
+
+ Address data_cage_base() const {
+ return pointer_cage_base() + pointer_cage_size();
+ }
+ size_t data_cage_size() const { return size_ - pointer_cage_size(); }
+
+ bool Contains(Address addr) const {
+ return addr >= base_ && addr < base_ + size_;
+ }
+
+ bool Contains(void* ptr) const {
+ return Contains(reinterpret_cast<Address>(ptr));
+ }
+
+ v8::PageAllocator* GetDataCagePageAllocator() {
+ return data_cage_page_allocator_.get();
+ }
+
+ private:
+ friend class SequentialUnmapperTest;
+
+ // We allow tests to disable the guard regions around the cage. This is useful
+ // for example for tests like the SequentialUnmapperTest which track page
+ // allocations and so would incur a large overhead from the guard regions.
+ bool Initialize(v8::PageAllocator* page_allocator, size_t total_size,
+ bool use_guard_regions);
+
+ Address base_ = kNullAddress;
+ size_t size_ = 0;
+ bool has_guard_regions_ = false;
+ bool initialized_ = false;
+ bool disabled_ = false;
+ v8::PageAllocator* page_allocator_ = nullptr;
+ std::unique_ptr<v8::PageAllocator> data_cage_page_allocator_;
+};
+
+V8VirtualMemoryCage* GetProcessWideVirtualMemoryCage();
+
+#endif // V8_VIRTUAL_MEMORY_CAGE
+
+V8_INLINE bool IsValidBackingStorePointer(void* ptr) {
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ Address addr = reinterpret_cast<Address>(ptr);
+ return kAllowBackingStoresOutsideDataCage || addr == kNullAddress ||
+ GetProcessWideVirtualMemoryCage()->Contains(addr);
+#else
+ return true;
+#endif
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INIT_VM_CAGE_H_
diff --git a/deps/v8/src/inspector/DEPS b/deps/v8/src/inspector/DEPS
index 1c3ef43314..08b97ea3e9 100644
--- a/deps/v8/src/inspector/DEPS
+++ b/deps/v8/src/inspector/DEPS
@@ -13,7 +13,6 @@ include_rules = [
"+src/base/safe_conversions.h",
"+src/base/template-utils.h",
"+src/base/v8-fallthrough.h",
- "+src/logging/tracing-flags.h",
"+src/numbers/conversions.h",
"+src/inspector",
"+src/tracing",
diff --git a/deps/v8/src/inspector/custom-preview.cc b/deps/v8/src/inspector/custom-preview.cc
index d8e88861cb..97b0a07210 100644
--- a/deps/v8/src/inspector/custom-preview.cc
+++ b/deps/v8/src/inspector/custom-preview.cc
@@ -5,6 +5,11 @@
#include "src/inspector/custom-preview.h"
#include "../../third_party/inspector_protocol/crdtp/json.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-json.h"
+#include "include/v8-microtask-queue.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index fc029e937a..e927c1cc40 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -34,7 +34,11 @@
#include <unordered_set>
#include "../../third_party/inspector_protocol/crdtp/json.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
#include "include/v8-inspector.h"
+#include "include/v8-microtask-queue.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/custom-preview.h"
#include "src/inspector/inspected-context.h"
@@ -354,8 +358,8 @@ class PropertyAccumulator : public ValueMirror::PropertyAccumulator {
Response InjectedScript::getProperties(
v8::Local<v8::Object> object, const String16& groupName, bool ownProperties,
- bool accessorPropertiesOnly, WrapMode wrapMode,
- std::unique_ptr<Array<PropertyDescriptor>>* properties,
+ bool accessorPropertiesOnly, bool nonIndexedPropertiesOnly,
+ WrapMode wrapMode, std::unique_ptr<Array<PropertyDescriptor>>* properties,
Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
v8::HandleScope handles(m_context->isolate());
v8::Local<v8::Context> context = m_context->context();
@@ -367,7 +371,8 @@ Response InjectedScript::getProperties(
std::vector<PropertyMirror> mirrors;
PropertyAccumulator accumulator(&mirrors);
if (!ValueMirror::getProperties(context, object, ownProperties,
- accessorPropertiesOnly, &accumulator)) {
+ accessorPropertiesOnly,
+ nonIndexedPropertiesOnly, &accumulator)) {
return createExceptionDetails(tryCatch, groupName, exceptionDetails);
}
for (const PropertyMirror& mirror : mirrors) {
diff --git a/deps/v8/src/inspector/injected-script.h b/deps/v8/src/inspector/injected-script.h
index 9971d7da3a..86bcf60b17 100644
--- a/deps/v8/src/inspector/injected-script.h
+++ b/deps/v8/src/inspector/injected-script.h
@@ -35,6 +35,9 @@
#include <unordered_map>
#include <unordered_set>
+#include "include/v8-exception.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-persistent-handle.h"
#include "src/base/macros.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Forward.h"
@@ -42,8 +45,6 @@
#include "src/inspector/v8-console.h"
#include "src/inspector/v8-debugger.h"
-#include "include/v8.h"
-
namespace v8_inspector {
class RemoteObjectId;
@@ -76,7 +77,8 @@ class InjectedScript final {
Response getProperties(
v8::Local<v8::Object>, const String16& groupName, bool ownProperties,
- bool accessorPropertiesOnly, WrapMode wrapMode,
+ bool accessorPropertiesOnly, bool nonIndexedPropertiesOnly,
+ WrapMode wrapMode,
std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
result,
Maybe<protocol::Runtime::ExceptionDetails>*);
diff --git a/deps/v8/src/inspector/inspected-context.cc b/deps/v8/src/inspector/inspected-context.cc
index a47df1ef12..6786f06b2f 100644
--- a/deps/v8/src/inspector/inspected-context.cc
+++ b/deps/v8/src/inspector/inspected-context.cc
@@ -4,14 +4,14 @@
#include "src/inspector/inspected-context.h"
+#include "include/v8-context.h"
+#include "include/v8-inspector.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-console.h"
#include "src/inspector/v8-inspector-impl.h"
-#include "include/v8-inspector.h"
-
namespace v8_inspector {
class InspectedContext::WeakCallbackData {
diff --git a/deps/v8/src/inspector/inspected-context.h b/deps/v8/src/inspector/inspected-context.h
index d3f0fe012b..f8811d0469 100644
--- a/deps/v8/src/inspector/inspected-context.h
+++ b/deps/v8/src/inspector/inspected-context.h
@@ -9,12 +9,18 @@
#include <unordered_map>
#include <unordered_set>
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-persistent-handle.h"
#include "src/base/macros.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/string-16.h"
#include "src/inspector/v8-debugger-id.h"
+namespace v8 {
+class Context;
+class Object;
+} // namespace v8
+
namespace v8_inspector {
class InjectedScript;
diff --git a/deps/v8/src/inspector/test-interface.h b/deps/v8/src/inspector/test-interface.h
index cf16c6936e..406ba02fa9 100644
--- a/deps/v8/src/inspector/test-interface.h
+++ b/deps/v8/src/inspector/test-interface.h
@@ -5,7 +5,7 @@
#ifndef V8_INSPECTOR_TEST_INTERFACE_H_
#define V8_INSPECTOR_TEST_INTERFACE_H_
-#include "include/v8.h"
+#include "include/v8config.h"
namespace v8_inspector {
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index 78622aa8d3..2734c67876 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -4,7 +4,11 @@
#include "src/inspector/v8-console-message.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
#include "include/v8-inspector.h"
+#include "include/v8-microtask-queue.h"
+#include "include/v8-primitive-object.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
diff --git a/deps/v8/src/inspector/v8-console-message.h b/deps/v8/src/inspector/v8-console-message.h
index 4dc521ee1c..cd960cf797 100644
--- a/deps/v8/src/inspector/v8-console-message.h
+++ b/deps/v8/src/inspector/v8-console-message.h
@@ -10,7 +10,8 @@
#include <memory>
#include <set>
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-persistent-handle.h"
#include "src/inspector/protocol/Console.h"
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/Runtime.h"
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index 93a73f2580..55b620b0fc 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -4,6 +4,11 @@
#include "src/inspector/v8-console.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-inspector.h"
+#include "include/v8-microtask-queue.h"
#include "src/base/macros.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
@@ -17,8 +22,6 @@
#include "src/inspector/v8-stack-trace-impl.h"
#include "src/inspector/v8-value-utils.h"
-#include "include/v8-inspector.h"
-
namespace v8_inspector {
namespace {
diff --git a/deps/v8/src/inspector/v8-console.h b/deps/v8/src/inspector/v8-console.h
index 59d7a8152f..cd10f11a8a 100644
--- a/deps/v8/src/inspector/v8-console.h
+++ b/deps/v8/src/inspector/v8-console.h
@@ -5,11 +5,16 @@
#ifndef V8_INSPECTOR_V8_CONSOLE_H_
#define V8_INSPECTOR_V8_CONSOLE_H_
+#include "include/v8-array-buffer.h"
+#include "include/v8-external.h"
+#include "include/v8-local-handle.h"
#include "src/base/macros.h"
-
-#include "include/v8.h"
#include "src/debug/interface-types.h"
+namespace v8 {
+class Set;
+} // namespace v8
+
namespace v8_inspector {
class InspectedContext;
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index c49903f8c3..c19e2b72af 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -7,7 +7,10 @@
#include <algorithm>
#include "../../third_party/inspector_protocol/crdtp/json.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
#include "include/v8-inspector.h"
+#include "include/v8-microtask-queue.h"
#include "src/base/safe_conversions.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
diff --git a/deps/v8/src/inspector/v8-debugger-script.h b/deps/v8/src/inspector/v8-debugger-script.h
index a8fd6775b0..d4486eb85e 100644
--- a/deps/v8/src/inspector/v8-debugger-script.h
+++ b/deps/v8/src/inspector/v8-debugger-script.h
@@ -32,12 +32,16 @@
#include <memory>
+#include "include/v8-local-handle.h"
+#include "include/v8-maybe.h"
#include "src/base/macros.h"
+#include "src/debug/debug-interface.h"
#include "src/inspector/string-16.h"
#include "src/inspector/string-util.h"
-#include "include/v8.h"
-#include "src/debug/debug-interface.h"
+namespace v8 {
+class Isolate;
+}
namespace v8_inspector {
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 0ac934a4d3..1216dc78de 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -4,6 +4,11 @@
#include "src/inspector/v8-debugger.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-microtask-queue.h"
+#include "include/v8-util.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/string-util.h"
@@ -14,8 +19,6 @@
#include "src/inspector/v8-stack-trace-impl.h"
#include "src/inspector/v8-value-utils.h"
-#include "include/v8-util.h"
-
namespace v8_inspector {
namespace {
@@ -535,10 +538,6 @@ size_t HeapLimitForDebugging(size_t initial_heap_limit) {
size_t V8Debugger::nearHeapLimitCallback(void* data, size_t current_heap_limit,
size_t initial_heap_limit) {
V8Debugger* thisPtr = static_cast<V8Debugger*>(data);
-// TODO(solanes, v8:10876): Remove when bug is solved.
-#if DEBUG
- printf("nearHeapLimitCallback\n");
-#endif
thisPtr->m_originalHeapLimit = current_heap_limit;
thisPtr->m_scheduledOOMBreak = true;
v8::Local<v8::Context> context =
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
index ed6901292c..955d7bcf76 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -4,6 +4,7 @@
#include "src/inspector/v8-heap-profiler-agent-impl.h"
+#include "include/v8-context.h"
#include "include/v8-inspector.h"
#include "include/v8-platform.h"
#include "include/v8-profiler.h"
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
index feda75ffb7..cd92bd32d0 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
@@ -11,7 +11,9 @@
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/HeapProfiler.h"
-#include "include/v8.h"
+namespace v8 {
+class Isolate;
+}
namespace v8_inspector {
diff --git a/deps/v8/src/inspector/v8-inspector-impl.cc b/deps/v8/src/inspector/v8-inspector-impl.cc
index f0cfa9b2c7..2da495c470 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-impl.cc
@@ -32,6 +32,9 @@
#include <vector>
+#include "include/v8-context.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-microtask-queue.h"
#include "include/v8-platform.h"
#include "src/base/platform/mutex.h"
#include "src/debug/debug-interface.h"
@@ -333,39 +336,6 @@ void V8InspectorImpl::allAsyncTasksCanceled() {
m_debugger->allAsyncTasksCanceled();
}
-V8Inspector::Counters::Counters(v8::Isolate* isolate) : m_isolate(isolate) {
- CHECK(m_isolate);
- auto* inspector =
- static_cast<V8InspectorImpl*>(v8::debug::GetInspector(m_isolate));
- CHECK(inspector);
- CHECK(!inspector->m_counters);
- inspector->m_counters = this;
- m_isolate->SetCounterFunction(&Counters::getCounterPtr);
-}
-
-V8Inspector::Counters::~Counters() {
- auto* inspector =
- static_cast<V8InspectorImpl*>(v8::debug::GetInspector(m_isolate));
- CHECK(inspector);
- inspector->m_counters = nullptr;
- m_isolate->SetCounterFunction(nullptr);
-}
-
-int* V8Inspector::Counters::getCounterPtr(const char* name) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
- DCHECK(isolate);
- V8Inspector* inspector = v8::debug::GetInspector(isolate);
- DCHECK(inspector);
- auto* instance = static_cast<V8InspectorImpl*>(inspector)->m_counters;
- DCHECK(instance);
- return &(instance->m_countersMap[name]);
-}
-
-std::shared_ptr<V8Inspector::Counters> V8InspectorImpl::enableCounters() {
- if (m_counters) return m_counters->shared_from_this();
- return std::make_shared<Counters>(m_isolate);
-}
-
v8::MaybeLocal<v8::Context> V8InspectorImpl::regexContext() {
if (m_regexContext.IsEmpty()) {
m_regexContext.Reset(m_isolate, v8::Context::New(m_isolate));
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
index e1607f88c0..5c797bbfc7 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -110,8 +110,6 @@ class V8InspectorImpl : public V8Inspector {
void externalAsyncTaskStarted(const V8StackTraceId& parent) override;
void externalAsyncTaskFinished(const V8StackTraceId& parent) override;
- std::shared_ptr<Counters> enableCounters() override;
-
bool associateExceptionData(v8::Local<v8::Context>,
v8::Local<v8::Value> exception,
v8::Local<v8::Name> key,
@@ -157,8 +155,6 @@ class V8InspectorImpl : public V8Inspector {
};
private:
- friend class Counters;
-
v8::Isolate* m_isolate;
V8InspectorClient* m_client;
std::unique_ptr<V8Debugger> m_debugger;
@@ -191,8 +187,6 @@ class V8InspectorImpl : public V8Inspector {
std::map<std::pair<int64_t, int64_t>, int> m_uniqueIdToContextId;
std::unique_ptr<V8Console> m_console;
-
- Counters* m_counters = nullptr;
};
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
index b2c04842cc..6b44459082 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -16,7 +16,6 @@
#include "src/inspector/v8-inspector-impl.h"
#include "src/inspector/v8-inspector-session-impl.h"
#include "src/inspector/v8-stack-trace-impl.h"
-#include "src/logging/tracing-flags.h"
namespace v8_inspector {
@@ -30,8 +29,6 @@ static const char preciseCoverageDetailed[] = "preciseCoverageDetailed";
static const char preciseCoverageAllowTriggeredUpdates[] =
"preciseCoverageAllowTriggeredUpdates";
static const char typeProfileStarted[] = "typeProfileStarted";
-static const char countersEnabled[] = "countersEnabled";
-static const char runtimeCallStatsEnabled[] = "runtimeCallStatsEnabled";
} // namespace ProfilerAgentState
namespace {
@@ -243,16 +240,6 @@ Response V8ProfilerAgentImpl::disable() {
m_state->setBoolean(ProfilerAgentState::profilerEnabled, false);
}
- if (m_counters) {
- disableCounters();
- m_state->setBoolean(ProfilerAgentState::countersEnabled, false);
- }
-
- if (m_runtime_call_stats_enabled) {
- disableRuntimeCallStats();
- m_state->setBoolean(ProfilerAgentState::runtimeCallStatsEnabled, false);
- }
-
return Response::Success();
}
@@ -287,15 +274,6 @@ void V8ProfilerAgentImpl::restore() {
Maybe<bool>(updatesAllowed), &timestamp);
}
}
-
- if (m_state->booleanProperty(ProfilerAgentState::countersEnabled, false)) {
- enableCounters();
- }
-
- if (m_state->booleanProperty(ProfilerAgentState::runtimeCallStatsEnabled,
- false)) {
- enableRuntimeCallStats();
- }
}
Response V8ProfilerAgentImpl::start() {
@@ -551,104 +529,6 @@ Response V8ProfilerAgentImpl::takeTypeProfile(
return Response::Success();
}
-Response V8ProfilerAgentImpl::enableCounters() {
- if (m_counters)
- return Response::ServerError("Counters collection already enabled.");
-
- if (V8Inspector* inspector = v8::debug::GetInspector(m_isolate))
- m_counters = inspector->enableCounters();
- else
- return Response::ServerError("No inspector found.");
-
- return Response::Success();
-}
-
-Response V8ProfilerAgentImpl::disableCounters() {
- if (m_counters) m_counters.reset();
- return Response::Success();
-}
-
-Response V8ProfilerAgentImpl::getCounters(
- std::unique_ptr<protocol::Array<protocol::Profiler::CounterInfo>>*
- out_result) {
- if (!m_counters)
- return Response::ServerError("Counters collection is not enabled.");
-
- *out_result =
- std::make_unique<protocol::Array<protocol::Profiler::CounterInfo>>();
-
- for (const auto& counter : m_counters->getCountersMap()) {
- (*out_result)
- ->emplace_back(
- protocol::Profiler::CounterInfo::create()
- .setName(String16(counter.first.data(), counter.first.length()))
- .setValue(counter.second)
- .build());
- }
-
- return Response::Success();
-}
-
-Response V8ProfilerAgentImpl::enableRuntimeCallStats() {
- if (v8::internal::TracingFlags::runtime_stats.load()) {
- return Response::ServerError(
- "Runtime Call Stats collection is already enabled.");
- }
-
- v8::internal::TracingFlags::runtime_stats.store(true);
- m_runtime_call_stats_enabled = true;
-
- return Response::Success();
-}
-
-Response V8ProfilerAgentImpl::disableRuntimeCallStats() {
- if (!v8::internal::TracingFlags::runtime_stats.load()) {
- return Response::ServerError(
- "Runtime Call Stats collection is not enabled.");
- }
-
- if (!m_runtime_call_stats_enabled) {
- return Response::ServerError(
- "Runtime Call Stats collection was not enabled by this session.");
- }
-
- v8::internal::TracingFlags::runtime_stats.store(false);
- m_runtime_call_stats_enabled = false;
-
- return Response::Success();
-}
-
-Response V8ProfilerAgentImpl::getRuntimeCallStats(
- std::unique_ptr<
- protocol::Array<protocol::Profiler::RuntimeCallCounterInfo>>*
- out_result) {
- if (!m_runtime_call_stats_enabled) {
- return Response::ServerError(
- "Runtime Call Stats collection is not enabled.");
- }
-
- if (!v8::internal::TracingFlags::runtime_stats.load()) {
- return Response::ServerError(
- "Runtime Call Stats collection was disabled outside of this session.");
- }
-
- *out_result = std::make_unique<
- protocol::Array<protocol::Profiler::RuntimeCallCounterInfo>>();
-
- v8::debug::EnumerateRuntimeCallCounters(
- m_isolate,
- [&](const char* name, int64_t count, v8::base::TimeDelta time) {
- (*out_result)
- ->emplace_back(protocol::Profiler::RuntimeCallCounterInfo::create()
- .setName(String16(name))
- .setValue(static_cast<double>(count))
- .setTime(time.InSecondsF())
- .build());
- });
-
- return Response::Success();
-}
-
String16 V8ProfilerAgentImpl::nextProfileId() {
return String16::fromInteger(
v8::base::Relaxed_AtomicIncrement(&s_lastProfileId, 1));
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.h b/deps/v8/src/inspector/v8-profiler-agent-impl.h
index 7cafa0cb01..4fba6e6c70 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.h
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.h
@@ -59,19 +59,6 @@ class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
std::unique_ptr<protocol::Array<protocol::Profiler::ScriptTypeProfile>>*
out_result) override;
- Response enableCounters() override;
- Response disableCounters() override;
- Response getCounters(
- std::unique_ptr<protocol::Array<protocol::Profiler::CounterInfo>>*
- out_result) override;
-
- Response enableRuntimeCallStats() override;
- Response disableRuntimeCallStats() override;
- Response getRuntimeCallStats(
- std::unique_ptr<
- protocol::Array<protocol::Profiler::RuntimeCallCounterInfo>>*
- out_result) override;
-
void consoleProfile(const String16& title);
void consoleProfileEnd(const String16& title);
@@ -95,8 +82,6 @@ class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
std::vector<ProfileDescriptor> m_startedProfiles;
String16 m_frontendInitiatedProfileId;
int m_startedProfilesCount = 0;
- std::shared_ptr<V8Inspector::Counters> m_counters;
- bool m_runtime_call_stats_enabled = false;
};
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-regex.cc b/deps/v8/src/inspector/v8-regex.cc
index 55b00d50ae..fd44a6a258 100644
--- a/deps/v8/src/inspector/v8-regex.cc
+++ b/deps/v8/src/inspector/v8-regex.cc
@@ -6,11 +6,15 @@
#include <limits.h>
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-inspector.h"
+#include "include/v8-microtask-queue.h"
+#include "include/v8-regexp.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-inspector-impl.h"
-#include "include/v8-inspector.h"
-
namespace v8_inspector {
V8Regex::V8Regex(V8InspectorImpl* inspector, const String16& pattern,
diff --git a/deps/v8/src/inspector/v8-regex.h b/deps/v8/src/inspector/v8-regex.h
index 9ce31cf4ce..75d972f15a 100644
--- a/deps/v8/src/inspector/v8-regex.h
+++ b/deps/v8/src/inspector/v8-regex.h
@@ -5,10 +5,13 @@
#ifndef V8_INSPECTOR_V8_REGEX_H_
#define V8_INSPECTOR_V8_REGEX_H_
+#include "include/v8-persistent-handle.h"
#include "src/base/macros.h"
#include "src/inspector/string-16.h"
-#include "include/v8.h"
+namespace v8 {
+class RegExp;
+}
namespace v8_inspector {
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index b78b641edf..3a8277639c 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -33,6 +33,11 @@
#include <inttypes.h>
#include "../../third_party/inspector_protocol/crdtp/json.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-inspector.h"
+#include "include/v8-microtask-queue.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
@@ -47,8 +52,6 @@
#include "src/inspector/v8-value-utils.h"
#include "src/tracing/trace-event.h"
-#include "include/v8-inspector.h"
-
namespace v8_inspector {
namespace V8RuntimeAgentImplState {
@@ -418,6 +421,7 @@ void V8RuntimeAgentImpl::callFunctionOn(
Response V8RuntimeAgentImpl::getProperties(
const String16& objectId, Maybe<bool> ownProperties,
Maybe<bool> accessorPropertiesOnly, Maybe<bool> generatePreview,
+ Maybe<bool> nonIndexedPropertiesOnly,
std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
result,
Maybe<protocol::Array<protocol::Runtime::InternalPropertyDescriptor>>*
@@ -442,6 +446,7 @@ Response V8RuntimeAgentImpl::getProperties(
response = scope.injectedScript()->getProperties(
object, scope.objectGroupName(), ownProperties.fromMaybe(false),
accessorPropertiesOnly.fromMaybe(false),
+ nonIndexedPropertiesOnly.fromMaybe(false),
generatePreview.fromMaybe(false) ? WrapMode::kWithPreview
: WrapMode::kNoPreview,
result, exceptionDetails);
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.h b/deps/v8/src/inspector/v8-runtime-agent-impl.h
index eadc596ca3..0ab39e8da2 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.h
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.h
@@ -35,11 +35,16 @@
#include <set>
#include <unordered_map>
-#include "include/v8.h"
+#include "include/v8-persistent-handle.h"
+// #include "include/v8-function-callback.h"
#include "src/base/macros.h"
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/Runtime.h"
+namespace v8 {
+class Script;
+} // namespace v8
+
namespace v8_inspector {
class InjectedScript;
@@ -88,6 +93,7 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
Response getProperties(
const String16& objectId, Maybe<bool> ownProperties,
Maybe<bool> accessorPropertiesOnly, Maybe<bool> generatePreview,
+ Maybe<bool> nonIndexedPropertiesOnly,
std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
result,
Maybe<protocol::Array<protocol::Runtime::InternalPropertyDescriptor>>*
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index cd86659fdb..aaad7ab6b3 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -9,11 +9,16 @@
#include <vector>
#include "include/v8-inspector.h"
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
#include "src/base/macros.h"
#include "src/inspector/protocol/Runtime.h"
#include "src/inspector/string-16.h"
+namespace v8 {
+class StackFrame;
+class StackTrace;
+} // namespace v8
+
namespace v8_inspector {
class AsyncStackTrace;
diff --git a/deps/v8/src/inspector/v8-value-utils.cc b/deps/v8/src/inspector/v8-value-utils.cc
index dd73c2919d..4b9f0b7a1a 100644
--- a/deps/v8/src/inspector/v8-value-utils.cc
+++ b/deps/v8/src/inspector/v8-value-utils.cc
@@ -4,6 +4,10 @@
#include "src/inspector/v8-value-utils.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-exception.h"
+
namespace v8_inspector {
v8::Maybe<bool> createDataProperty(v8::Local<v8::Context> context,
diff --git a/deps/v8/src/inspector/v8-value-utils.h b/deps/v8/src/inspector/v8-value-utils.h
index 6817d9fbb6..7eae23d9b1 100644
--- a/deps/v8/src/inspector/v8-value-utils.h
+++ b/deps/v8/src/inspector/v8-value-utils.h
@@ -5,10 +5,9 @@
#ifndef V8_INSPECTOR_V8_VALUE_UTILS_H_
#define V8_INSPECTOR_V8_VALUE_UTILS_H_
+#include "include/v8-local-handle.h"
#include "src/inspector/protocol/Protocol.h"
-#include "include/v8.h"
-
namespace v8_inspector {
v8::Maybe<bool> createDataProperty(v8::Local<v8::Context>,
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index 78078f4c17..57eebb0c80 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -7,6 +7,15 @@
#include <algorithm>
#include <cmath>
+#include "include/v8-container.h"
+#include "include/v8-date.h"
+#include "include/v8-function.h"
+#include "include/v8-microtask-queue.h"
+#include "include/v8-primitive-object.h"
+#include "include/v8-proxy.h"
+#include "include/v8-regexp.h"
+#include "include/v8-typed-array.h"
+#include "include/v8-wasm.h"
#include "src/base/optional.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/v8-debugger.h"
@@ -786,7 +795,7 @@ class PreviewPropertyAccumulator : public ValueMirror::PropertyAccumulator {
!mirror.value) {
return true;
}
- if (!mirror.isOwn) return true;
+ if (!mirror.isOwn && !mirror.isSynthetic) return true;
if (std::find(m_blocklist.begin(), m_blocklist.end(), mirror.name) !=
m_blocklist.end()) {
return true;
@@ -844,7 +853,7 @@ bool getPropertiesForPreview(v8::Local<v8::Context> context,
: -1;
PreviewPropertyAccumulator accumulator(blocklist, skipIndex, nameLimit,
indexLimit, overflow, properties);
- return ValueMirror::getProperties(context, object, false, false,
+ return ValueMirror::getProperties(context, object, false, false, false,
&accumulator);
}
@@ -1178,6 +1187,7 @@ ValueMirror::~ValueMirror() = default;
bool ValueMirror::getProperties(v8::Local<v8::Context> context,
v8::Local<v8::Object> object,
bool ownProperties, bool accessorPropertiesOnly,
+ bool nonIndexedPropertiesOnly,
PropertyAccumulator* accumulator) {
v8::Isolate* isolate = context->GetIsolate();
v8::TryCatch tryCatch(isolate);
@@ -1209,6 +1219,14 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
while (!iterator->Done()) {
bool isOwn = iterator->is_own();
if (!isOwn && ownProperties) break;
+ bool isIndex = iterator->is_array_index();
+ if (isIndex && nonIndexedPropertiesOnly) {
+ if (!iterator->Advance().FromMaybe(false)) {
+ CHECK(tryCatch.HasCaught());
+ return false;
+ }
+ continue;
+ }
v8::Local<v8::Name> v8Name = iterator->name();
v8::Maybe<bool> result = set->Has(context, v8Name);
if (result.IsNothing()) return false;
@@ -1287,10 +1305,14 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
if (v8::debug::CallFunctionOn(context, getterFunction, object, 0,
nullptr, true)
.ToLocal(&value)) {
- valueMirror = ValueMirror::create(context, value);
- isOwn = true;
- setterMirror = nullptr;
- getterMirror = nullptr;
+ if (value->IsPromise() &&
+ value.As<v8::Promise>()->State() == v8::Promise::kRejected) {
+ value.As<v8::Promise>()->MarkAsHandled();
+ } else {
+ valueMirror = ValueMirror::create(context, value);
+ setterMirror = nullptr;
+ getterMirror = nullptr;
+ }
}
}
}
@@ -1302,7 +1324,8 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
configurable,
enumerable,
isOwn,
- iterator->is_array_index(),
+ isIndex,
+ isAccessorProperty && valueMirror,
std::move(valueMirror),
std::move(getterMirror),
std::move(setterMirror),
diff --git a/deps/v8/src/inspector/value-mirror.h b/deps/v8/src/inspector/value-mirror.h
index 88b4ad2711..721695e74d 100644
--- a/deps/v8/src/inspector/value-mirror.h
+++ b/deps/v8/src/inspector/value-mirror.h
@@ -8,7 +8,7 @@
#include <memory>
#include "include/v8-inspector.h"
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
#include "src/base/macros.h"
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/protocol/Runtime.h"
@@ -38,6 +38,7 @@ struct PropertyMirror {
bool enumerable;
bool isOwn;
bool isIndex;
+ bool isSynthetic;
std::unique_ptr<ValueMirror> value;
std::unique_ptr<ValueMirror> getter;
std::unique_ptr<ValueMirror> setter;
@@ -74,6 +75,7 @@ class ValueMirror {
static bool getProperties(v8::Local<v8::Context> context,
v8::Local<v8::Object> object, bool ownProperties,
bool accessorPropertiesOnly,
+ bool nonIndexedPropertiesOnly,
PropertyAccumulator* accumulator);
static void getInternalProperties(
v8::Local<v8::Context> context, v8::Local<v8::Object> object,
diff --git a/deps/v8/src/interpreter/OWNERS b/deps/v8/src/interpreter/OWNERS
index 481caea50b..e61606034b 100644
--- a/deps/v8/src/interpreter/OWNERS
+++ b/deps/v8/src/interpreter/OWNERS
@@ -1,3 +1,2 @@
leszeks@chromium.org
-mythria@chromium.org
-rmcilroy@chromium.org
+syg@chromium.org
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index f78330bea1..9536df172d 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -8,6 +8,7 @@
#include <unordered_map>
#include <unordered_set>
+#include "include/v8-extension.h"
#include "src/api/api-inl.h"
#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast.h"
@@ -2525,7 +2526,7 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
const AstRawString* class_name =
expr->scope()->class_variable() != nullptr
? expr->scope()->class_variable()->raw_name()
- : ast_string_constants()->empty_string();
+ : ast_string_constants()->anonymous_string();
builder()
->LoadLiteral(class_name)
.StoreAccumulatorInRegister(brand)
@@ -3647,8 +3648,7 @@ void BytecodeGenerator::BuildVariableAssignment(
break;
}
case VariableLocation::UNALLOCATED: {
- FeedbackSlot slot = GetCachedStoreGlobalICSlot(language_mode(), variable);
- builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
+ BuildStoreGlobal(variable);
break;
}
case VariableLocation::CONTEXT: {
@@ -3737,9 +3737,7 @@ void BytecodeGenerator::BuildVariableAssignment(
if (mode == VariableMode::kConst) {
builder()->CallRuntime(Runtime::kThrowConstAssignError);
} else {
- FeedbackSlot slot =
- GetCachedStoreGlobalICSlot(language_mode(), variable);
- builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
+ BuildStoreGlobal(variable);
}
}
break;
@@ -3772,6 +3770,21 @@ void BytecodeGenerator::BuildStoreNamedProperty(const Expression* object_expr,
}
}
+void BytecodeGenerator::BuildStoreGlobal(Variable* variable) {
+ Register value;
+ if (!execution_result()->IsEffect()) {
+ value = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ }
+
+ FeedbackSlot slot = GetCachedStoreGlobalICSlot(language_mode(), variable);
+ builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
+
+ if (!execution_result()->IsEffect()) {
+ builder()->LoadAccumulatorWithRegister(value);
+ }
+}
+
// static
BytecodeGenerator::AssignmentLhsData
BytecodeGenerator::AssignmentLhsData::NonProperty(Expression* expr) {
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 01f4b2a5b6..d3cc86acf5 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -241,6 +241,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
const AstRawString* name);
void BuildStoreNamedProperty(const Expression* object_expr, Register object,
const AstRawString* name);
+ void BuildStoreGlobal(Variable* variable);
void BuildVariableLoad(Variable* variable, HoleCheckMode hole_check_mode,
TypeofMode typeof_mode = TypeofMode::kNotInside);
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index d938aff5a5..61734b9044 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -106,7 +106,7 @@ namespace interpreter {
OperandType::kIdx) \
V(LdaGlobalInsideTypeof, ImplicitRegisterUse::kWriteAccumulator, \
OperandType::kIdx, OperandType::kIdx) \
- V(StaGlobal, ImplicitRegisterUse::kReadAccumulator, OperandType::kIdx, \
+ V(StaGlobal, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kIdx, \
OperandType::kIdx) \
\
/* Context operations */ \
@@ -393,7 +393,7 @@ namespace interpreter {
\
/* Complex flow control For..in */ \
V(ForInEnumerate, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg) \
- V(ForInPrepare, ImplicitRegisterUse::kReadAccumulator, \
+ V(ForInPrepare, ImplicitRegisterUse::kReadWriteAccumulator, \
OperandType::kRegOutTriple, OperandType::kIdx) \
V(ForInContinue, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
OperandType::kReg) \
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index c6d6e44a2f..49e4fad1fb 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -157,7 +157,7 @@ TNode<Object> InterpreterAssembler::GetAccumulator() {
DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
implicit_register_use_ =
implicit_register_use_ | ImplicitRegisterUse::kReadAccumulator;
- return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked());
+ return GetAccumulatorUnchecked();
}
void InterpreterAssembler::SetAccumulator(TNode<Object> value) {
@@ -204,8 +204,8 @@ TNode<Context> InterpreterAssembler::GetContextAtDepth(TNode<Context> context,
TNode<IntPtrT> InterpreterAssembler::RegisterLocation(
TNode<IntPtrT> reg_index) {
- return Signed(WordPoisonOnSpeculation(
- IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index))));
+ return Signed(
+ IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index)));
}
TNode<IntPtrT> InterpreterAssembler::RegisterLocation(Register reg) {
@@ -218,8 +218,7 @@ TNode<IntPtrT> InterpreterAssembler::RegisterFrameOffset(TNode<IntPtrT> index) {
TNode<Object> InterpreterAssembler::LoadRegister(TNode<IntPtrT> reg_index) {
return LoadFullTagged(GetInterpretedFramePointer(),
- RegisterFrameOffset(reg_index),
- LoadSensitivity::kCritical);
+ RegisterFrameOffset(reg_index));
}
TNode<Object> InterpreterAssembler::LoadRegister(Register reg) {
@@ -242,16 +241,14 @@ TNode<IntPtrT> InterpreterAssembler::LoadAndUntagRegister(Register reg) {
TNode<Object> InterpreterAssembler::LoadRegisterAtOperandIndex(
int operand_index) {
- return LoadRegister(
- BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
+ return LoadRegister(BytecodeOperandReg(operand_index));
}
std::pair<TNode<Object>, TNode<Object>>
InterpreterAssembler::LoadRegisterPairAtOperandIndex(int operand_index) {
DCHECK_EQ(OperandType::kRegPair,
Bytecodes::GetOperandType(bytecode_, operand_index));
- TNode<IntPtrT> first_reg_index =
- BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
+ TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index);
TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
return std::make_pair(LoadRegister(first_reg_index),
LoadRegister(second_reg_index));
@@ -263,8 +260,7 @@ InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
Bytecodes::GetOperandType(bytecode_, operand_index)));
DCHECK_EQ(OperandType::kRegCount,
Bytecodes::GetOperandType(bytecode_, operand_index + 1));
- TNode<IntPtrT> base_reg = RegisterLocation(
- BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
+ TNode<IntPtrT> base_reg = RegisterLocation(BytecodeOperandReg(operand_index));
TNode<Uint32T> reg_count = BytecodeOperandCount(operand_index + 1);
return RegListNodePair(base_reg, reg_count);
}
@@ -272,7 +268,6 @@ InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
TNode<Object> InterpreterAssembler::LoadRegisterFromRegisterList(
const RegListNodePair& reg_list, int index) {
TNode<IntPtrT> location = RegisterLocationInRegisterList(reg_list, index);
- // Location is already poisoned on speculation, so no need to poison here.
return LoadFullTagged(location);
}
@@ -329,8 +324,7 @@ void InterpreterAssembler::StoreRegisterForShortStar(TNode<Object> value,
void InterpreterAssembler::StoreRegisterAtOperandIndex(TNode<Object> value,
int operand_index) {
- StoreRegister(value,
- BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
+ StoreRegister(value, BytecodeOperandReg(operand_index));
}
void InterpreterAssembler::StoreRegisterPairAtOperandIndex(TNode<Object> value1,
@@ -338,8 +332,7 @@ void InterpreterAssembler::StoreRegisterPairAtOperandIndex(TNode<Object> value1,
int operand_index) {
DCHECK_EQ(OperandType::kRegOutPair,
Bytecodes::GetOperandType(bytecode_, operand_index));
- TNode<IntPtrT> first_reg_index =
- BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
+ TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index);
StoreRegister(value1, first_reg_index);
TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
StoreRegister(value2, second_reg_index);
@@ -350,8 +343,7 @@ void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
int operand_index) {
DCHECK_EQ(OperandType::kRegOutTriple,
Bytecodes::GetOperandType(bytecode_, operand_index));
- TNode<IntPtrT> first_reg_index =
- BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
+ TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index);
StoreRegister(value1, first_reg_index);
TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
StoreRegister(value2, second_reg_index);
@@ -370,30 +362,27 @@ TNode<IntPtrT> InterpreterAssembler::OperandOffset(int operand_index) {
}
TNode<Uint8T> InterpreterAssembler::BytecodeOperandUnsignedByte(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
return Load<Uint8T>(BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), operand_offset),
- needs_poisoning);
+ IntPtrAdd(BytecodeOffset(), operand_offset));
}
TNode<Int8T> InterpreterAssembler::BytecodeOperandSignedByte(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
return Load<Int8T>(BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), operand_offset),
- needs_poisoning);
+ IntPtrAdd(BytecodeOffset(), operand_offset));
}
TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned(
- int relative_offset, MachineType result_type,
- LoadSensitivity needs_poisoning) {
+ int relative_offset, MachineType result_type) {
static const int kMaxCount = 4;
DCHECK(!TargetSupportsUnalignedAccess());
@@ -430,9 +419,8 @@ TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned(
TNode<IntPtrT> offset =
IntPtrConstant(relative_offset + msb_offset + i * kStep);
TNode<IntPtrT> array_offset = IntPtrAdd(BytecodeOffset(), offset);
- bytes[i] =
- UncheckedCast<Word32T>(Load(machine_type, BytecodeArrayTaggedPointer(),
- array_offset, needs_poisoning));
+ bytes[i] = UncheckedCast<Word32T>(
+ Load(machine_type, BytecodeArrayTaggedPointer(), array_offset));
}
// Pack LSB to MSB.
@@ -446,7 +434,7 @@ TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned(
}
TNode<Uint16T> InterpreterAssembler::BytecodeOperandUnsignedShort(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(
OperandSize::kShort,
@@ -456,16 +444,15 @@ TNode<Uint16T> InterpreterAssembler::BytecodeOperandUnsignedShort(
if (TargetSupportsUnalignedAccess()) {
return Load<Uint16T>(
BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
- needs_poisoning);
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return UncheckedCast<Uint16T>(BytecodeOperandReadUnaligned(
- operand_offset, MachineType::Uint16(), needs_poisoning));
+ return UncheckedCast<Uint16T>(
+ BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16()));
}
}
TNode<Int16T> InterpreterAssembler::BytecodeOperandSignedShort(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(
OperandSize::kShort,
@@ -475,16 +462,15 @@ TNode<Int16T> InterpreterAssembler::BytecodeOperandSignedShort(
if (TargetSupportsUnalignedAccess()) {
return Load<Int16T>(
BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
- needs_poisoning);
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return UncheckedCast<Int16T>(BytecodeOperandReadUnaligned(
- operand_offset, MachineType::Int16(), needs_poisoning));
+ return UncheckedCast<Int16T>(
+ BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16()));
}
}
TNode<Uint32T> InterpreterAssembler::BytecodeOperandUnsignedQuad(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
@@ -493,16 +479,15 @@ TNode<Uint32T> InterpreterAssembler::BytecodeOperandUnsignedQuad(
if (TargetSupportsUnalignedAccess()) {
return Load<Uint32T>(
BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
- needs_poisoning);
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return UncheckedCast<Uint32T>(BytecodeOperandReadUnaligned(
- operand_offset, MachineType::Uint32(), needs_poisoning));
+ return UncheckedCast<Uint32T>(
+ BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32()));
}
}
TNode<Int32T> InterpreterAssembler::BytecodeOperandSignedQuad(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
@@ -511,43 +496,40 @@ TNode<Int32T> InterpreterAssembler::BytecodeOperandSignedQuad(
if (TargetSupportsUnalignedAccess()) {
return Load<Int32T>(
BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
- needs_poisoning);
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return UncheckedCast<Int32T>(BytecodeOperandReadUnaligned(
- operand_offset, MachineType::Int32(), needs_poisoning));
+ return UncheckedCast<Int32T>(
+ BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32()));
}
}
TNode<Int32T> InterpreterAssembler::BytecodeSignedOperand(
- int operand_index, OperandSize operand_size,
- LoadSensitivity needs_poisoning) {
+ int operand_index, OperandSize operand_size) {
DCHECK(!Bytecodes::IsUnsignedOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
switch (operand_size) {
case OperandSize::kByte:
- return BytecodeOperandSignedByte(operand_index, needs_poisoning);
+ return BytecodeOperandSignedByte(operand_index);
case OperandSize::kShort:
- return BytecodeOperandSignedShort(operand_index, needs_poisoning);
+ return BytecodeOperandSignedShort(operand_index);
case OperandSize::kQuad:
- return BytecodeOperandSignedQuad(operand_index, needs_poisoning);
+ return BytecodeOperandSignedQuad(operand_index);
case OperandSize::kNone:
UNREACHABLE();
}
}
TNode<Uint32T> InterpreterAssembler::BytecodeUnsignedOperand(
- int operand_index, OperandSize operand_size,
- LoadSensitivity needs_poisoning) {
+ int operand_index, OperandSize operand_size) {
DCHECK(Bytecodes::IsUnsignedOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
switch (operand_size) {
case OperandSize::kByte:
- return BytecodeOperandUnsignedByte(operand_index, needs_poisoning);
+ return BytecodeOperandUnsignedByte(operand_index);
case OperandSize::kShort:
- return BytecodeOperandUnsignedShort(operand_index, needs_poisoning);
+ return BytecodeOperandUnsignedShort(operand_index);
case OperandSize::kQuad:
- return BytecodeOperandUnsignedQuad(operand_index, needs_poisoning);
+ return BytecodeOperandUnsignedQuad(operand_index);
case OperandSize::kNone:
UNREACHABLE();
}
@@ -629,23 +611,22 @@ TNode<TaggedIndex> InterpreterAssembler::BytecodeOperandIdxTaggedIndex(
}
TNode<UintPtrT> InterpreterAssembler::BytecodeOperandConstantPoolIdx(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_EQ(OperandType::kIdx,
Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
return ChangeUint32ToWord(
- BytecodeUnsignedOperand(operand_index, operand_size, needs_poisoning));
+ BytecodeUnsignedOperand(operand_index, operand_size));
}
-TNode<IntPtrT> InterpreterAssembler::BytecodeOperandReg(
- int operand_index, LoadSensitivity needs_poisoning) {
+TNode<IntPtrT> InterpreterAssembler::BytecodeOperandReg(int operand_index) {
DCHECK(Bytecodes::IsRegisterOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
OperandSize operand_size =
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
return ChangeInt32ToIntPtr(
- BytecodeSignedOperand(operand_index, operand_size, needs_poisoning));
+ BytecodeSignedOperand(operand_index, operand_size));
}
TNode<Uint32T> InterpreterAssembler::BytecodeOperandRuntimeId(
@@ -682,8 +663,7 @@ TNode<Object> InterpreterAssembler::LoadConstantPoolEntry(TNode<WordT> index) {
TNode<FixedArray> constant_pool = CAST(LoadObjectField(
BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset));
return UnsafeLoadFixedArrayElement(constant_pool,
- UncheckedCast<IntPtrT>(index), 0,
- LoadSensitivity::kCritical);
+ UncheckedCast<IntPtrT>(index), 0);
}
TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntry(
@@ -693,8 +673,7 @@ TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntry(
TNode<Object> InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
int operand_index) {
- TNode<UintPtrT> index =
- BytecodeOperandConstantPoolIdx(operand_index, LoadSensitivity::kSafe);
+ TNode<UintPtrT> index = BytecodeOperandConstantPoolIdx(operand_index);
return LoadConstantPoolEntry(index);
}
@@ -733,14 +712,16 @@ void InterpreterAssembler::CallJSAndDispatch(
bytecode_ == Bytecode::kInvokeIntrinsic);
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
- TNode<Word32T> args_count;
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- // The receiver is implied, so it is not in the argument list.
- args_count = args.reg_count();
- } else {
- // Subtract the receiver from the argument count.
+ TNode<Word32T> args_count = args.reg_count();
+ const bool receiver_included =
+ receiver_mode != ConvertReceiverMode::kNullOrUndefined;
+ if (kJSArgcIncludesReceiver && !receiver_included) {
+ // Add receiver if we want to include it in argc and it isn't already.
+ args_count = Int32Add(args_count, Int32Constant(kJSArgcReceiverSlots));
+ } else if (!kJSArgcIncludesReceiver && receiver_included) {
+ // Subtract receiver if we don't want to include it, but it is included.
TNode<Int32T> receiver_count = Int32Constant(1);
- args_count = Int32Sub(args.reg_count(), receiver_count);
+ args_count = Int32Sub(args_count, receiver_count);
}
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
@@ -768,6 +749,7 @@ void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function,
Callable callable = CodeFactory::Call(isolate());
TNode<Code> code_target = HeapConstant(callable.code());
+ arg_count = JSParameterCount(arg_count);
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// The first argument parameter (the receiver) is implied to be undefined.
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
@@ -812,8 +794,11 @@ void InterpreterAssembler::CallJSWithSpreadAndDispatch(
InterpreterPushArgsMode::kWithFinalSpread);
TNode<Code> code_target = HeapConstant(callable.code());
- TNode<Int32T> receiver_count = Int32Constant(1);
- TNode<Word32T> args_count = Int32Sub(args.reg_count(), receiver_count);
+ TNode<Word32T> args_count = args.reg_count();
+ if (!kJSArgcIncludesReceiver) {
+ TNode<Int32T> receiver_count = Int32Constant(1);
+ args_count = Int32Sub(args_count, receiver_count);
+ }
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
args_count, args.base_reg_location(),
function);
@@ -832,6 +817,7 @@ TNode<Object> InterpreterAssembler::Construct(
Label return_result(this), construct_generic(this),
construct_array(this, &var_site);
+ TNode<Word32T> args_count = JSParameterCount(args.reg_count());
CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
slot_id, UpdateFeedbackMode::kOptionalFeedback,
&construct_generic, &construct_array, &var_site);
@@ -843,7 +829,7 @@ TNode<Object> InterpreterAssembler::Construct(
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kOther);
var_result =
- CallStub(callable, context, args.reg_count(), args.base_reg_location(),
+ CallStub(callable, context, args_count, args.base_reg_location(),
target, new_target, UndefinedConstant());
Goto(&return_result);
}
@@ -856,7 +842,7 @@ TNode<Object> InterpreterAssembler::Construct(
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kArrayFunction);
var_result =
- CallStub(callable, context, args.reg_count(), args.base_reg_location(),
+ CallStub(callable, context, args_count, args.base_reg_location(),
target, new_target, var_site.value());
Goto(&return_result);
}
@@ -982,7 +968,8 @@ TNode<Object> InterpreterAssembler::ConstructWithSpread(
Comment("call using ConstructWithSpread builtin");
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kWithFinalSpread);
- return CallStub(callable, context, args.reg_count(), args.base_reg_location(),
+ TNode<Word32T> args_count = JSParameterCount(args.reg_count());
+ return CallStub(callable, context, args_count, args.base_reg_location(),
target, new_target, UndefinedConstant());
}
@@ -1224,13 +1211,9 @@ void InterpreterAssembler::DispatchToBytecode(
void InterpreterAssembler::DispatchToBytecodeHandlerEntry(
TNode<RawPtrT> handler_entry, TNode<IntPtrT> bytecode_offset) {
- // Propagate speculation poisoning.
- TNode<RawPtrT> poisoned_handler_entry =
- UncheckedCast<RawPtrT>(WordPoisonOnSpeculation(handler_entry));
- TailCallBytecodeDispatch(InterpreterDispatchDescriptor{},
- poisoned_handler_entry, GetAccumulatorUnchecked(),
- bytecode_offset, BytecodeArrayTaggedPointer(),
- DispatchTablePointer());
+ TailCallBytecodeDispatch(
+ InterpreterDispatchDescriptor{}, handler_entry, GetAccumulatorUnchecked(),
+ bytecode_offset, BytecodeArrayTaggedPointer(), DispatchTablePointer());
}
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
@@ -1325,7 +1308,7 @@ void InterpreterAssembler::OnStackReplacement(TNode<Context> context,
TNode<Uint16T> data_type = LoadInstanceType(CAST(sfi_data));
Label baseline(this);
- GotoIf(InstanceTypeEqual(data_type, BASELINE_DATA_TYPE), &baseline);
+ GotoIf(InstanceTypeEqual(data_type, CODET_TYPE), &baseline);
{
Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
CallStub(callable, context);
@@ -1382,7 +1365,7 @@ bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
return false;
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC || \
- V8_TARGET_ARCH_PPC64
+ V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64
return true;
#else
#error "Unknown Architecture"
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index bf4641200b..d89c05e2d3 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -308,51 +308,32 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// The |result_type| determines the size and signedness. of the
// value read. This method should only be used on architectures that
// do not support unaligned memory accesses.
- TNode<Word32T> BytecodeOperandReadUnaligned(
- int relative_offset, MachineType result_type,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
+ TNode<Word32T> BytecodeOperandReadUnaligned(int relative_offset,
+ MachineType result_type);
// Returns zero- or sign-extended to word32 value of the operand.
- TNode<Uint8T> BytecodeOperandUnsignedByte(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- TNode<Int8T> BytecodeOperandSignedByte(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- TNode<Uint16T> BytecodeOperandUnsignedShort(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- TNode<Int16T> BytecodeOperandSignedShort(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- TNode<Uint32T> BytecodeOperandUnsignedQuad(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- TNode<Int32T> BytecodeOperandSignedQuad(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
+ TNode<Uint8T> BytecodeOperandUnsignedByte(int operand_index);
+ TNode<Int8T> BytecodeOperandSignedByte(int operand_index);
+ TNode<Uint16T> BytecodeOperandUnsignedShort(int operand_index);
+ TNode<Int16T> BytecodeOperandSignedShort(int operand_index);
+ TNode<Uint32T> BytecodeOperandUnsignedQuad(int operand_index);
+ TNode<Int32T> BytecodeOperandSignedQuad(int operand_index);
// Returns zero- or sign-extended to word32 value of the operand of
// given size.
- TNode<Int32T> BytecodeSignedOperand(
- int operand_index, OperandSize operand_size,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- TNode<Uint32T> BytecodeUnsignedOperand(
- int operand_index, OperandSize operand_size,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
+ TNode<Int32T> BytecodeSignedOperand(int operand_index,
+ OperandSize operand_size);
+ TNode<Uint32T> BytecodeUnsignedOperand(int operand_index,
+ OperandSize operand_size);
// Returns the word-size sign-extended register index for bytecode operand
- // |operand_index| in the current bytecode. Value is not poisoned on
- // speculation since the value loaded from the register is poisoned instead.
- TNode<IntPtrT> BytecodeOperandReg(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
+ // |operand_index| in the current bytecode.
+ TNode<IntPtrT> BytecodeOperandReg(int operand_index);
// Returns the word zero-extended index immediate for bytecode operand
- // |operand_index| in the current bytecode for use when loading a .
- TNode<UintPtrT> BytecodeOperandConstantPoolIdx(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
+ // |operand_index| in the current bytecode for use when loading a constant
+ // pool element.
+ TNode<UintPtrT> BytecodeOperandConstantPoolIdx(int operand_index);
// Jump relative to the current bytecode by the |jump_offset|. If |backward|,
// then jump backward (subtract the offset), otherwise jump forward (add the
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index e010ab2f64..fb23f90841 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -236,8 +236,14 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
- CallBuiltin(Builtin::kStoreGlobalIC, context, name, value, slot,
- maybe_vector);
+ TNode<Object> result = CallBuiltin(Builtin::kStoreGlobalIC, context, name,
+ value, slot, maybe_vector);
+ // To avoid special logic in the deoptimizer to re-materialize the value in
+ // the accumulator, we overwrite the accumulator after the IC call. It
+ // doesn't really matter what we write to the accumulator here, since we
+ // restore to the correct value on the outside. Storing the result means we
+ // don't need to keep unnecessary state alive across the callstub.
+ SetAccumulator(result);
Dispatch();
}
@@ -598,14 +604,14 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- TVARIABLE(Object, var_result);
- var_result = CallStub(ic, context, object, name, value, slot, maybe_vector);
+ TNode<Object> result =
+ CallStub(ic, context, object, name, value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(var_result.value());
+ SetAccumulator(result);
Dispatch();
}
};
@@ -642,15 +648,14 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- TVARIABLE(Object, var_result);
- var_result = CallBuiltin(Builtin::kKeyedStoreIC, context, object, name, value,
- slot, maybe_vector);
+ TNode<Object> result = CallBuiltin(Builtin::kKeyedStoreIC, context, object,
+ name, value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(var_result.value());
+ SetAccumulator(result);
Dispatch();
}
@@ -666,15 +671,15 @@ IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- TVARIABLE(Object, var_result);
- var_result = CallBuiltin(Builtin::kStoreInArrayLiteralIC, context, array,
- index, value, slot, feedback_vector);
+ TNode<Object> result =
+ CallBuiltin(Builtin::kStoreInArrayLiteralIC, context, array, index, value,
+ slot, feedback_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(var_result.value());
+ SetAccumulator(result);
Dispatch();
}
@@ -2834,6 +2839,11 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
ForInPrepare(enumerator, vector_index, maybe_feedback_vector, &cache_array,
&cache_length, UpdateFeedbackMode::kOptionalFeedback);
+ // The accumulator is clobbered soon after ForInPrepare, so avoid keeping it
+ // alive too long and instead set it to cache_array to match the first return
+ // value of Builtin::kForInPrepare.
+ SetAccumulator(cache_array);
+
StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
Dispatch();
}
@@ -2970,8 +2980,8 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
TNode<SharedFunctionInfo> shared =
CAST(LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset));
- TNode<Int32T> formal_parameter_count = LoadObjectField<Uint16T>(
- shared, SharedFunctionInfo::kFormalParameterCountOffset);
+ TNode<Int32T> formal_parameter_count =
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(shared);
ExportParametersAndRegisterFile(array, registers, formal_parameter_count);
StoreObjectField(generator, JSGeneratorObject::kContextOffset, context);
@@ -3046,8 +3056,8 @@ IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) {
TNode<SharedFunctionInfo> shared =
CAST(LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset));
- TNode<Int32T> formal_parameter_count = LoadObjectField<Uint16T>(
- shared, SharedFunctionInfo::kFormalParameterCountOffset);
+ TNode<Int32T> formal_parameter_count =
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(shared);
ImportRegisterFile(
CAST(LoadObjectField(generator,
@@ -3074,9 +3084,6 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, const char* debug_name,
compiler::CodeAssemblerState state(
isolate, &zone, InterpreterDispatchDescriptor{},
CodeKind::BYTECODE_HANDLER, debug_name,
- FLAG_untrusted_code_mitigations
- ? PoisoningMitigationLevel::kPoisonCriticalOnly
- : PoisoningMitigationLevel::kDontPoison,
builtin);
switch (bytecode) {
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index a874954157..88d7706c72 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -12,6 +12,7 @@
#include "src/ast/scopes.h"
#include "src/codegen/compiler.h"
#include "src/codegen/unoptimized-compilation-info.h"
+#include "src/common/globals.h"
#include "src/execution/local-isolate.h"
#include "src/heap/parked-scope.h"
#include "src/init/bootstrapper.h"
@@ -389,11 +390,9 @@ uintptr_t Interpreter::GetDispatchCounter(Bytecode from, Bytecode to) const {
to_index];
}
-Local<v8::Object> Interpreter::GetDispatchCountersObject() {
- v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(isolate_);
- Local<v8::Context> context = isolate->GetCurrentContext();
-
- Local<v8::Object> counters_map = v8::Object::New(isolate);
+Handle<JSObject> Interpreter::GetDispatchCountersObject() {
+ Handle<JSObject> counters_map =
+ isolate_->factory()->NewJSObjectWithNullProto();
// Output is a JSON-encoded object of objects.
//
@@ -408,30 +407,23 @@ Local<v8::Object> Interpreter::GetDispatchCountersObject() {
for (int from_index = 0; from_index < kNumberOfBytecodes; ++from_index) {
Bytecode from_bytecode = Bytecodes::FromByte(from_index);
- Local<v8::Object> counters_row = v8::Object::New(isolate);
+ Handle<JSObject> counters_row =
+ isolate_->factory()->NewJSObjectWithNullProto();
for (int to_index = 0; to_index < kNumberOfBytecodes; ++to_index) {
Bytecode to_bytecode = Bytecodes::FromByte(to_index);
uintptr_t counter = GetDispatchCounter(from_bytecode, to_bytecode);
if (counter > 0) {
- std::string to_name = Bytecodes::ToString(to_bytecode);
- Local<v8::String> to_name_object =
- v8::String::NewFromUtf8(isolate, to_name.c_str()).ToLocalChecked();
- Local<v8::Number> counter_object = v8::Number::New(isolate, counter);
- CHECK(counters_row
- ->DefineOwnProperty(context, to_name_object, counter_object)
- .IsJust());
+ Handle<Object> value = isolate_->factory()->NewNumberFromSize(counter);
+ JSObject::AddProperty(isolate_, counters_row,
+ Bytecodes::ToString(to_bytecode), value, NONE);
}
}
- std::string from_name = Bytecodes::ToString(from_bytecode);
- Local<v8::String> from_name_object =
- v8::String::NewFromUtf8(isolate, from_name.c_str()).ToLocalChecked();
-
- CHECK(
- counters_map->DefineOwnProperty(context, from_name_object, counters_row)
- .IsJust());
+ JSObject::AddProperty(isolate_, counters_map,
+ Bytecodes::ToString(from_bytecode), counters_row,
+ NONE);
}
return counters_map;
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 95a3c4ef79..9daa886e65 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -72,7 +72,7 @@ class Interpreter {
// Disassembler support.
V8_EXPORT_PRIVATE const char* LookupNameOfBytecodeHandler(const Code code);
- V8_EXPORT_PRIVATE Local<v8::Object> GetDispatchCountersObject();
+ V8_EXPORT_PRIVATE Handle<JSObject> GetDispatchCountersObject();
void ForEachBytecode(const std::function<void(Bytecode, OperandScale)>& f);
diff --git a/deps/v8/src/json/json-parser.h b/deps/v8/src/json/json-parser.h
index 03e7537512..4819f9d64e 100644
--- a/deps/v8/src/json/json-parser.h
+++ b/deps/v8/src/json/json-parser.h
@@ -5,6 +5,7 @@
#ifndef V8_JSON_JSON_PARSER_H_
#define V8_JSON_JSON_PARSER_H_
+#include "include/v8-callbacks.h"
#include "src/base/small-vector.h"
#include "src/base/strings.h"
#include "src/execution/isolate.h"
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index 66057e1a39..1cbc01193d 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -8,6 +8,7 @@
#include <queue>
#include "include/libplatform/libplatform.h"
+#include "src/base/bounded-page-allocator.h"
#include "src/base/debug/stack_trace.h"
#include "src/base/logging.h"
#include "src/base/page-allocator.h"
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index 49c8406533..fb94972b85 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -4,6 +4,9 @@
#include "src/libsampler/sampler.h"
+#include "include/v8-isolate.h"
+#include "include/v8-unwinder.h"
+
#ifdef USE_SIGNALS
#include <errno.h>
@@ -412,6 +415,10 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
state->pc = reinterpret_cast<void*>(mcontext.pc);
state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
+#elif V8_HOST_ARCH_LOONG64
+ state->pc = reinterpret_cast<void*>(mcontext.__pc);
+ state->sp = reinterpret_cast<void*>(mcontext.__gregs[3]);
+ state->fp = reinterpret_cast<void*>(mcontext.__gregs[22]);
#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
#if V8_LIBC_GLIBC
state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->nip);
diff --git a/deps/v8/src/libsampler/sampler.h b/deps/v8/src/libsampler/sampler.h
index 35bcf23546..98c0606151 100644
--- a/deps/v8/src/libsampler/sampler.h
+++ b/deps/v8/src/libsampler/sampler.h
@@ -8,8 +8,8 @@
#include <atomic>
#include <memory>
#include <unordered_map>
+#include <vector>
-#include "include/v8.h"
#include "src/base/lazy-instance.h"
#include "src/base/macros.h"
@@ -18,6 +18,10 @@
#endif
namespace v8 {
+
+class Isolate;
+struct RegisterState;
+
namespace sampler {
// ----------------------------------------------------------------------------
diff --git a/deps/v8/src/logging/counters.h b/deps/v8/src/logging/counters.h
index 3a2527f49c..08e35352cf 100644
--- a/deps/v8/src/logging/counters.h
+++ b/deps/v8/src/logging/counters.h
@@ -7,7 +7,7 @@
#include <memory>
-#include "include/v8.h"
+#include "include/v8-callbacks.h"
#include "src/base/atomic-utils.h"
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc
index 4f6aa856d7..022d0e9c57 100644
--- a/deps/v8/src/logging/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -9,6 +9,7 @@
#include <memory>
#include <sstream>
+#include "include/v8-locker.h"
#include "src/api/api-inl.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/platform.h"
@@ -614,6 +615,8 @@ void LowLevelLogger::LogCodeInfo() {
const char arch[] = "ppc64";
#elif V8_TARGET_ARCH_MIPS
const char arch[] = "mips";
+#elif V8_TARGET_ARCH_LOONG64
+ const char arch[] = "loong64";
#elif V8_TARGET_ARCH_ARM64
const char arch[] = "arm64";
#elif V8_TARGET_ARCH_S390
@@ -730,7 +733,7 @@ void JitLogger::LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) {
JitCodeEvent event = {};
event.type = JitCodeEvent::CODE_ADDED;
- event.code_type = JitCodeEvent::JIT_CODE;
+ event.code_type = JitCodeEvent::WASM_CODE;
event.code_start = code->instructions().begin();
event.code_len = code->instructions().length();
event.name.str = name;
@@ -1558,12 +1561,14 @@ void Logger::CodeLinePosInfoRecordEvent(Address code_start,
CodeLinePosEvent(*jit_logger_, code_start, iter, code_type);
}
-void Logger::CodeLinePosInfoRecordEvent(
+#if V8_ENABLE_WEBASSEMBLY
+void Logger::WasmCodeLinePosInfoRecordEvent(
Address code_start, base::Vector<const byte> source_position_table) {
if (!jit_logger_) return;
SourcePositionTableIterator iter(source_position_table);
- CodeLinePosEvent(*jit_logger_, code_start, iter, JitCodeEvent::JIT_CODE);
+ CodeLinePosEvent(*jit_logger_, code_start, iter, JitCodeEvent::WASM_CODE);
}
+#endif // V8_ENABLE_WEBASSEMBLY
void Logger::CodeNameEvent(Address addr, int pos, const char* code_name) {
if (code_name == nullptr) return; // Not a code object.
@@ -2217,12 +2222,11 @@ void ExistingCodeLogger::LogCompiledFunctions() {
Handle<AbstractCode>(
AbstractCode::cast(shared->InterpreterTrampoline()), isolate_));
}
- if (shared->HasBaselineData()) {
+ if (shared->HasBaselineCode()) {
LogExistingFunction(
- shared,
- Handle<AbstractCode>(
- AbstractCode::cast(shared->baseline_data().baseline_code()),
- isolate_));
+ shared, Handle<AbstractCode>(
+ AbstractCode::cast(shared->baseline_code(kAcquireLoad)),
+ isolate_));
}
if (pair.second.is_identical_to(BUILTIN_CODE(isolate_, CompileLazy)))
continue;
diff --git a/deps/v8/src/logging/log.h b/deps/v8/src/logging/log.h
index 612c2a2df7..b9e7a75c20 100644
--- a/deps/v8/src/logging/log.h
+++ b/deps/v8/src/logging/log.h
@@ -10,6 +10,7 @@
#include <set>
#include <string>
+#include "include/v8-callbacks.h"
#include "include/v8-profiler.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/execution/isolate.h"
@@ -241,8 +242,10 @@ class Logger : public CodeEventListener {
void CodeLinePosInfoRecordEvent(Address code_start,
ByteArray source_position_table,
JitCodeEvent::CodeType code_type);
- void CodeLinePosInfoRecordEvent(
+#if V8_ENABLE_WEBASSEMBLY
+ void WasmCodeLinePosInfoRecordEvent(
Address code_start, base::Vector<const byte> source_position_table);
+#endif // V8_ENABLE_WEBASSEMBLY
void CodeNameEvent(Address addr, int pos, const char* code_name);
diff --git a/deps/v8/src/logging/runtime-call-stats.cc b/deps/v8/src/logging/runtime-call-stats.cc
index 86e3215f74..66e26096d0 100644
--- a/deps/v8/src/logging/runtime-call-stats.cc
+++ b/deps/v8/src/logging/runtime-call-stats.cc
@@ -260,17 +260,6 @@ void RuntimeCallStats::Print(std::ostream& os) {
entries.Print(os);
}
-void RuntimeCallStats::EnumerateCounters(
- debug::RuntimeCallCounterCallback callback) {
- if (current_timer_.Value() != nullptr) {
- current_timer_.Value()->Snapshot();
- }
- for (int i = 0; i < kNumberOfCounters; i++) {
- RuntimeCallCounter* counter = GetCounter(i);
- callback(counter->name(), counter->count(), counter->time());
- }
-}
-
void RuntimeCallStats::Reset() {
if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
diff --git a/deps/v8/src/logging/runtime-call-stats.h b/deps/v8/src/logging/runtime-call-stats.h
index 5b3284a0c9..4e54e0ab71 100644
--- a/deps/v8/src/logging/runtime-call-stats.h
+++ b/deps/v8/src/logging/runtime-call-stats.h
@@ -5,8 +5,6 @@
#ifndef V8_LOGGING_RUNTIME_CALL_STATS_H_
#define V8_LOGGING_RUNTIME_CALL_STATS_H_
-#include "include/v8.h"
-
#ifdef V8_RUNTIME_CALL_STATS
#include "src/base/atomic-utils.h"
@@ -339,6 +337,7 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FrameElision) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, GenericLowering) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Inlining) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, JSWasmInlining) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, JumpThreading) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierPopulateReferenceMaps) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterAllocator) \
@@ -597,9 +596,6 @@ class RuntimeCallStats final {
V8_EXPORT_PRIVATE void Print();
V8_NOINLINE void Dump(v8::tracing::TracedValue* value);
- V8_EXPORT_PRIVATE void EnumerateCounters(
- debug::RuntimeCallCounterCallback callback);
-
ThreadId thread_id() const { return thread_id_; }
RuntimeCallTimer* current_timer() { return current_timer_.Value(); }
RuntimeCallCounter* current_counter() { return current_counter_.Value(); }
diff --git a/deps/v8/src/objects/allocation-site-inl.h b/deps/v8/src/objects/allocation-site-inl.h
index 9d17048958..1fc6709a5e 100644
--- a/deps/v8/src/objects/allocation-site-inl.h
+++ b/deps/v8/src/objects/allocation-site-inl.h
@@ -5,9 +5,9 @@
#ifndef V8_OBJECTS_ALLOCATION_SITE_INL_H_
#define V8_OBJECTS_ALLOCATION_SITE_INL_H_
-#include "src/objects/allocation-site.h"
-
+#include "src/common/globals.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/allocation-site.h"
#include "src/objects/js-objects-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -30,8 +30,7 @@ ACCESSORS(AllocationSite, transition_info_or_boilerplate, Object,
RELEASE_ACQUIRE_ACCESSORS(AllocationSite, transition_info_or_boilerplate,
Object, kTransitionInfoOrBoilerplateOffset)
ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
-IMPLICIT_TAG_RELAXED_INT32_ACCESSORS(AllocationSite, pretenure_data,
- kPretenureDataOffset)
+RELAXED_INT32_ACCESSORS(AllocationSite, pretenure_data, kPretenureDataOffset)
INT32_ACCESSORS(AllocationSite, pretenure_create_count,
kPretenureCreateCountOffset)
ACCESSORS(AllocationSite, dependent_code, DependentCode, kDependentCodeOffset)
@@ -73,7 +72,7 @@ void AllocationSite::Initialize() {
set_transition_info_or_boilerplate(Smi::zero());
SetElementsKind(GetInitialFastElementsKind());
set_nested_site(Smi::zero());
- set_pretenure_data(0);
+ set_pretenure_data(0, kRelaxedStore);
set_pretenure_create_count(0);
set_dependent_code(
DependentCode::cast(GetReadOnlyRoots().empty_weak_fixed_array()),
@@ -139,36 +138,39 @@ inline bool AllocationSite::CanTrack(InstanceType type) {
}
AllocationSite::PretenureDecision AllocationSite::pretenure_decision() const {
- return PretenureDecisionBits::decode(pretenure_data());
+ return PretenureDecisionBits::decode(pretenure_data(kRelaxedLoad));
}
void AllocationSite::set_pretenure_decision(PretenureDecision decision) {
- int32_t value = pretenure_data();
- set_pretenure_data(PretenureDecisionBits::update(value, decision));
+ int32_t value = pretenure_data(kRelaxedLoad);
+ set_pretenure_data(PretenureDecisionBits::update(value, decision),
+ kRelaxedStore);
}
bool AllocationSite::deopt_dependent_code() const {
- return DeoptDependentCodeBit::decode(pretenure_data());
+ return DeoptDependentCodeBit::decode(pretenure_data(kRelaxedLoad));
}
void AllocationSite::set_deopt_dependent_code(bool deopt) {
- int32_t value = pretenure_data();
- set_pretenure_data(DeoptDependentCodeBit::update(value, deopt));
+ int32_t value = pretenure_data(kRelaxedLoad);
+ set_pretenure_data(DeoptDependentCodeBit::update(value, deopt),
+ kRelaxedStore);
}
int AllocationSite::memento_found_count() const {
- return MementoFoundCountBits::decode(pretenure_data());
+ return MementoFoundCountBits::decode(pretenure_data(kRelaxedLoad));
}
inline void AllocationSite::set_memento_found_count(int count) {
- int32_t value = pretenure_data();
+ int32_t value = pretenure_data(kRelaxedLoad);
// Verify that we can count more mementos than we can possibly find in one
// new space collection.
DCHECK((GetHeap()->MaxSemiSpaceSize() /
(Heap::kMinObjectSizeInTaggedWords * kTaggedSize +
AllocationMemento::kSize)) < MementoFoundCountBits::kMax);
DCHECK_LT(count, MementoFoundCountBits::kMax);
- set_pretenure_data(MementoFoundCountBits::update(value, count));
+ set_pretenure_data(MementoFoundCountBits::update(value, count),
+ kRelaxedStore);
}
int AllocationSite::memento_create_count() const {
diff --git a/deps/v8/src/objects/allocation-site.h b/deps/v8/src/objects/allocation-site.h
index a069279c6e..4d673b4caf 100644
--- a/deps/v8/src/objects/allocation-site.h
+++ b/deps/v8/src/objects/allocation-site.h
@@ -51,7 +51,7 @@ class AllocationSite : public Struct {
DECL_ACCESSORS(nested_site, Object)
// Bitfield containing pretenuring information.
- DECL_INT32_ACCESSORS(pretenure_data)
+ DECL_RELAXED_INT32_ACCESSORS(pretenure_data)
DECL_INT32_ACCESSORS(pretenure_create_count)
DECL_ACCESSORS(dependent_code, DependentCode)
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index 372fc745e4..55f51a7669 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -29,12 +29,10 @@ class JSArgumentsObject
// JSSloppyArgumentsObject is just a JSArgumentsObject with specific initial
// map. This initial map adds in-object properties for "length" and "callee".
-class JSSloppyArgumentsObject : public JSArgumentsObject {
+class JSSloppyArgumentsObject
+ : public TorqueGeneratedJSSloppyArgumentsObject<JSSloppyArgumentsObject,
+ JSArgumentsObject> {
public:
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSArgumentsObject::kHeaderSize,
- TORQUE_GENERATED_JS_SLOPPY_ARGUMENTS_OBJECT_FIELDS)
-
// Indices of in-object properties.
static const int kLengthIndex = 0;
static const int kCalleeIndex = kLengthIndex + 1;
@@ -45,13 +43,10 @@ class JSSloppyArgumentsObject : public JSArgumentsObject {
// JSStrictArgumentsObject is just a JSArgumentsObject with specific initial
// map. This initial map adds an in-object property for "length".
-class JSStrictArgumentsObject : public JSArgumentsObject {
+class JSStrictArgumentsObject
+ : public TorqueGeneratedJSStrictArgumentsObject<JSStrictArgumentsObject,
+ JSArgumentsObject> {
public:
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSArgumentsObject::kHeaderSize,
- TORQUE_GENERATED_JS_STRICT_ARGUMENTS_OBJECT_FIELDS)
-
// Indices of in-object properties.
static const int kLengthIndex = 0;
STATIC_ASSERT(kLengthIndex == JSSloppyArgumentsObject::kLengthIndex);
diff --git a/deps/v8/src/objects/arguments.tq b/deps/v8/src/objects/arguments.tq
index cc60e62f70..c522b1db76 100644
--- a/deps/v8/src/objects/arguments.tq
+++ b/deps/v8/src/objects/arguments.tq
@@ -14,14 +14,12 @@ macro IsJSArgumentsObjectWithLength(implicit context: Context)(o: Object):
}
// Just a starting shape for JSObject; properties can move after initialization.
-@doNotGenerateCppClass
extern shape JSSloppyArgumentsObject extends JSArgumentsObject {
length: JSAny;
callee: JSAny;
}
// Just a starting shape for JSObject; properties can move after initialization.
-@doNotGenerateCppClass
extern shape JSStrictArgumentsObject extends JSArgumentsObject {
length: JSAny;
}
@@ -50,7 +48,7 @@ extern shape JSStrictArgumentsObject extends JSArgumentsObject {
// arguments array is not a fixed array or if key >= elements.arguments.length.
//
// Otherwise, t = elements.mapped_entries[key]. If t is the hole, then the
-// entry has been deleted fron the arguments object, and value is looked up in
+// entry has been deleted from the arguments object, and value is looked up in
// the unmapped arguments array, as described above. Otherwise, t is a Smi
// index into the context array specified at elements.context, and the return
// value is elements.context[t].
diff --git a/deps/v8/src/objects/backing-store.cc b/deps/v8/src/objects/backing-store.cc
index 7a59c2e715..836ad3e71d 100644
--- a/deps/v8/src/objects/backing-store.cc
+++ b/deps/v8/src/objects/backing-store.cc
@@ -9,6 +9,7 @@
#include "src/base/platform/wrappers.h"
#include "src/execution/isolate.h"
#include "src/handles/global-handles.h"
+#include "src/init/vm-cage.h"
#include "src/logging/counters.h"
#if V8_ENABLE_WEBASSEMBLY
@@ -38,8 +39,8 @@ constexpr uint64_t kFullGuardSize = uint64_t{10} * GB;
#endif // V8_ENABLE_WEBASSEMBLY
-#if V8_TARGET_ARCH_MIPS64
-// MIPS64 has a user space of 2^40 bytes on most processors,
+#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64
+// MIPS64 and LOONG64 has a user space of 2^40 bytes on most processors,
// address space limits needs to be smaller.
constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB
#elif V8_TARGET_ARCH_RISCV64
@@ -152,6 +153,15 @@ BackingStore::~BackingStore() {
return;
}
+ PageAllocator* page_allocator = GetPlatformPageAllocator();
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (GetProcessWideVirtualMemoryCage()->Contains(buffer_start_)) {
+ page_allocator = GetPlatformDataCagePageAllocator();
+ } else {
+ DCHECK(kAllowBackingStoresOutsideDataCage);
+ }
+#endif
+
#if V8_ENABLE_WEBASSEMBLY
if (is_wasm_memory_) {
// TODO(v8:11111): RAB / GSAB - Wasm integration.
@@ -176,8 +186,8 @@ BackingStore::~BackingStore() {
bool pages_were_freed =
region.size() == 0 /* no need to free any pages */ ||
- FreePages(GetPlatformPageAllocator(),
- reinterpret_cast<void*>(region.begin()), region.size());
+ FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
+ region.size());
CHECK(pages_were_freed);
BackingStore::ReleaseReservation(reservation_size);
Clear();
@@ -195,8 +205,8 @@ BackingStore::~BackingStore() {
bool pages_were_freed =
region.size() == 0 /* no need to free any pages */ ||
- FreePages(GetPlatformPageAllocator(),
- reinterpret_cast<void*>(region.begin()), region.size());
+ FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
+ region.size());
CHECK(pages_were_freed);
BackingStore::ReleaseReservation(reservation_size);
Clear();
@@ -263,6 +273,8 @@ std::unique_ptr<BackingStore> BackingStore::Allocate(
counters->array_buffer_new_size_failures()->AddSample(mb_length);
return {};
}
+
+ DCHECK(IsValidBackingStorePointer(buffer_start));
}
auto result = new BackingStore(buffer_start, // start
@@ -400,10 +412,24 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
// 2. Allocate pages (inaccessible by default).
//--------------------------------------------------------------------------
void* allocation_base = nullptr;
+ PageAllocator* page_allocator = GetPlatformPageAllocator();
auto allocate_pages = [&] {
- allocation_base =
- AllocatePages(GetPlatformPageAllocator(), nullptr, reservation_size,
- page_size, PageAllocator::kNoAccess);
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ page_allocator = GetPlatformDataCagePageAllocator();
+ allocation_base = AllocatePages(page_allocator, nullptr, reservation_size,
+ page_size, PageAllocator::kNoAccess);
+ if (allocation_base) return true;
+ // We currently still allow falling back to the platform page allocator if
+ // the data cage page allocator fails. This will eventually be removed.
+ // TODO(chromium:1218005) once we forbid the fallback, we should have a
+ // single API, e.g. GetPlatformDataPageAllocator(), that returns the correct
+ // page allocator to use here depending on whether the virtual memory cage
+ // is enabled or not.
+ if (!kAllowBackingStoresOutsideDataCage) return false;
+ page_allocator = GetPlatformPageAllocator();
+#endif
+ allocation_base = AllocatePages(page_allocator, nullptr, reservation_size,
+ page_size, PageAllocator::kNoAccess);
return allocation_base != nullptr;
};
if (!gc_retry(allocate_pages)) {
@@ -414,6 +440,8 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
return {};
}
+ DCHECK(IsValidBackingStorePointer(allocation_base));
+
// Get a pointer to the start of the buffer, skipping negative guard region
// if necessary.
#if V8_ENABLE_WEBASSEMBLY
@@ -429,8 +457,8 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
size_t committed_byte_length = initial_pages * page_size;
auto commit_memory = [&] {
return committed_byte_length == 0 ||
- SetPermissions(GetPlatformPageAllocator(), buffer_start,
- committed_byte_length, PageAllocator::kReadWrite);
+ SetPermissions(page_allocator, buffer_start, committed_byte_length,
+ PageAllocator::kReadWrite);
};
if (!gc_retry(commit_memory)) {
TRACE_BS("BSw:try failed to set permissions (%p, %zu)\n", buffer_start,
@@ -708,6 +736,7 @@ BackingStore::ResizeOrGrowResult BackingStore::GrowInPlace(
std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
Isolate* isolate, void* allocation_base, size_t allocation_length,
SharedFlag shared, bool free_on_destruct) {
+ DCHECK(IsValidBackingStorePointer(allocation_base));
auto result = new BackingStore(allocation_base, // start
allocation_length, // length
allocation_length, // max length
@@ -729,6 +758,7 @@ std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
void* allocation_base, size_t allocation_length,
v8::BackingStore::DeleterCallback deleter, void* deleter_data,
SharedFlag shared) {
+ DCHECK(IsValidBackingStorePointer(allocation_base));
bool is_empty_deleter = (deleter == v8::BackingStore::EmptyDeleter);
auto result = new BackingStore(allocation_base, // start
allocation_length, // length
diff --git a/deps/v8/src/objects/backing-store.h b/deps/v8/src/objects/backing-store.h
index 013a97a526..6c709c2b96 100644
--- a/deps/v8/src/objects/backing-store.h
+++ b/deps/v8/src/objects/backing-store.h
@@ -7,8 +7,8 @@
#include <memory>
+#include "include/v8-array-buffer.h"
#include "include/v8-internal.h"
-#include "include/v8.h"
#include "src/base/optional.h"
#include "src/handles/handles.h"
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index 5d21adfb89..3f1f12bcc2 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -1107,8 +1107,19 @@ MaybeHandle<BigInt> BigInt::FromObject(Isolate* isolate, Handle<Object> obj) {
if (isolate->has_pending_exception()) {
return MaybeHandle<BigInt>();
} else {
+ Handle<String> str = Handle<String>::cast(obj);
+ constexpr int kMaxRenderedLength = 1000;
+ if (str->length() > kMaxRenderedLength) {
+ Factory* factory = isolate->factory();
+ Handle<String> prefix =
+ factory->NewProperSubString(str, 0, kMaxRenderedLength);
+ Handle<SeqTwoByteString> ellipsis =
+ factory->NewRawTwoByteString(1).ToHandleChecked();
+ ellipsis->SeqTwoByteStringSet(0, 0x2026);
+ str = factory->NewConsString(prefix, ellipsis).ToHandleChecked();
+ }
THROW_NEW_ERROR(isolate,
- NewSyntaxError(MessageTemplate::kBigIntFromObject, obj),
+ NewSyntaxError(MessageTemplate::kBigIntFromObject, str),
BigInt);
}
}
diff --git a/deps/v8/src/objects/cell-inl.h b/deps/v8/src/objects/cell-inl.h
index 5c809b8172..d47b49504e 100644
--- a/deps/v8/src/objects/cell-inl.h
+++ b/deps/v8/src/objects/cell-inl.h
@@ -20,6 +20,10 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(Cell)
+DEF_RELAXED_GETTER(Cell, value, Object) {
+ return TaggedField<Object, kValueOffset>::Relaxed_Load(cage_base, *this);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/cell.h b/deps/v8/src/objects/cell.h
index 4076dea0e6..56c1016bd5 100644
--- a/deps/v8/src/objects/cell.h
+++ b/deps/v8/src/objects/cell.h
@@ -19,6 +19,9 @@ class Cell : public TorqueGeneratedCell<Cell, HeapObject> {
public:
inline Address ValueAddress() { return address() + kValueOffset; }
+ using TorqueGeneratedCell::value;
+ DECL_RELAXED_GETTER(value, Object)
+
using BodyDescriptor = FixedBodyDescriptor<kValueOffset, kSize, kSize>;
TQ_OBJECT_CONSTRUCTORS(Cell)
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index cae02edc23..48e5810f14 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -9,6 +9,7 @@
#include "src/baseline/bytecode-offset-iterator.h"
#include "src/codegen/code-desc.h"
#include "src/common/assert-scope.h"
+#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
@@ -205,15 +206,26 @@ CODE_ACCESSORS_CHECKED(relocation_info_or_undefined, HeapObject,
kRelocationInfoOffset,
value.IsUndefined() || value.IsByteArray())
-CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-#define IS_BASELINE() (kind() == CodeKind::BASELINE)
+ACCESSORS_CHECKED2(Code, deoptimization_data, FixedArray,
+ kDeoptimizationDataOrInterpreterDataOffset,
+ kind() != CodeKind::BASELINE,
+ kind() != CodeKind::BASELINE &&
+ !ObjectInYoungGeneration(value))
+ACCESSORS_CHECKED2(Code, bytecode_or_interpreter_data, HeapObject,
+ kDeoptimizationDataOrInterpreterDataOffset,
+ kind() == CodeKind::BASELINE,
+ kind() == CodeKind::BASELINE &&
+ !ObjectInYoungGeneration(value))
+
ACCESSORS_CHECKED2(Code, source_position_table, ByteArray, kPositionTableOffset,
- !IS_BASELINE(),
- !IS_BASELINE() && !ObjectInYoungGeneration(value))
+ kind() != CodeKind::BASELINE,
+ kind() != CodeKind::BASELINE &&
+ !ObjectInYoungGeneration(value))
ACCESSORS_CHECKED2(Code, bytecode_offset_table, ByteArray, kPositionTableOffset,
- IS_BASELINE(),
- IS_BASELINE() && !ObjectInYoungGeneration(value))
-#undef IS_BASELINE
+ kind() == CodeKind::BASELINE,
+ kind() == CodeKind::BASELINE &&
+ !ObjectInYoungGeneration(value))
+
// Concurrent marker needs to access kind specific flags in code data container.
RELEASE_ACQUIRE_CODE_ACCESSORS(code_data_container, CodeDataContainer,
kCodeDataContainerOffset)
@@ -268,7 +280,8 @@ inline CodeDataContainer CodeDataContainerFromCodeT(CodeT code) {
void Code::WipeOutHeader() {
WRITE_FIELD(*this, kRelocationInfoOffset, Smi::FromInt(0));
- WRITE_FIELD(*this, kDeoptimizationDataOffset, Smi::FromInt(0));
+ WRITE_FIELD(*this, kDeoptimizationDataOrInterpreterDataOffset,
+ Smi::FromInt(0));
WRITE_FIELD(*this, kPositionTableOffset, Smi::FromInt(0));
WRITE_FIELD(*this, kCodeDataContainerOffset, Smi::FromInt(0));
}
@@ -553,44 +566,47 @@ inline bool Code::is_turbofanned() const {
inline bool Code::can_have_weak_objects() const {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
return CanHaveWeakObjectsField::decode(flags);
}
inline void Code::set_can_have_weak_objects(bool value) {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags();
+ int32_t previous = container.kind_specific_flags(kRelaxedLoad);
int32_t updated = CanHaveWeakObjectsField::update(previous, value);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
inline bool Code::is_promise_rejection() const {
DCHECK(kind() == CodeKind::BUILTIN);
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
return IsPromiseRejectionField::decode(flags);
}
inline void Code::set_is_promise_rejection(bool value) {
DCHECK(kind() == CodeKind::BUILTIN);
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags();
+ int32_t previous = container.kind_specific_flags(kRelaxedLoad);
int32_t updated = IsPromiseRejectionField::update(previous, value);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
inline bool Code::is_exception_caught() const {
DCHECK(kind() == CodeKind::BUILTIN);
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
return IsExceptionCaughtField::decode(flags);
}
inline void Code::set_is_exception_caught(bool value) {
DCHECK(kind() == CodeKind::BUILTIN);
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags();
+ int32_t previous = container.kind_specific_flags(kRelaxedLoad);
int32_t updated = IsExceptionCaughtField::update(previous, value);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
inline bool Code::is_off_heap_trampoline() const {
@@ -642,7 +658,8 @@ int Code::stack_slots() const {
bool Code::marked_for_deoptimization() const {
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
return MarkedForDeoptimizationField::decode(flags);
}
@@ -650,14 +667,15 @@ void Code::set_marked_for_deoptimization(bool flag) {
DCHECK(CodeKindCanDeoptimize(kind()));
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags();
+ int32_t previous = container.kind_specific_flags(kRelaxedLoad);
int32_t updated = MarkedForDeoptimizationField::update(previous, flag);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
int Code::deoptimization_count() const {
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
int count = DeoptCountField::decode(flags);
DCHECK_GE(count, 0);
return count;
@@ -666,17 +684,18 @@ int Code::deoptimization_count() const {
void Code::increment_deoptimization_count() {
DCHECK(CodeKindCanDeoptimize(kind()));
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t flags = container.kind_specific_flags();
+ int32_t flags = container.kind_specific_flags(kRelaxedLoad);
int32_t count = DeoptCountField::decode(flags);
DCHECK_GE(count, 0);
CHECK_LE(count + 1, DeoptCountField::kMax);
int32_t updated = DeoptCountField::update(flags, count + 1);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
bool Code::embedded_objects_cleared() const {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
return EmbeddedObjectsClearedField::decode(flags);
}
@@ -684,14 +703,15 @@ void Code::set_embedded_objects_cleared(bool flag) {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
DCHECK_IMPLIES(flag, marked_for_deoptimization());
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags();
+ int32_t previous = container.kind_specific_flags(kRelaxedLoad);
int32_t updated = EmbeddedObjectsClearedField::update(previous, flag);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
bool Code::deopt_already_counted() const {
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
return DeoptAlreadyCountedField::decode(flags);
}
@@ -699,9 +719,9 @@ void Code::set_deopt_already_counted(bool flag) {
DCHECK(CodeKindCanDeoptimize(kind()));
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags();
+ int32_t previous = container.kind_specific_flags(kRelaxedLoad);
int32_t updated = DeoptAlreadyCountedField::update(previous, flag);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
bool Code::is_optimized_code() const {
@@ -800,8 +820,8 @@ bool Code::IsExecutable() {
// concurrent marker.
STATIC_ASSERT(FIELD_SIZE(CodeDataContainer::kKindSpecificFlagsOffset) ==
kInt32Size);
-IMPLICIT_TAG_RELAXED_INT32_ACCESSORS(CodeDataContainer, kind_specific_flags,
- kKindSpecificFlagsOffset)
+RELAXED_INT32_ACCESSORS(CodeDataContainer, kind_specific_flags,
+ kKindSpecificFlagsOffset)
ACCESSORS_CHECKED(CodeDataContainer, raw_code, Object, kCodeOffset,
V8_EXTERNAL_CODE_SPACE_BOOL)
RELAXED_ACCESSORS_CHECKED(CodeDataContainer, raw_code, Object, kCodeOffset,
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index e2a4528d0d..b3f9953be1 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -333,7 +333,7 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32) || \
- defined(V8_TARGET_ARCH_RISCV64)
+ defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64)
for (RelocIterator it(*this, kModeMask); !it.done(); it.next()) {
// On these platforms we emit relative builtin-to-builtin
// jumps for isolate independent builtins in the snapshot. They are later
@@ -349,10 +349,10 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
}
return false;
}
+ return true;
#else
#error Unsupported architecture.
#endif
- return true;
}
bool Code::Inlines(SharedFunctionInfo sfi) {
@@ -775,7 +775,7 @@ void DependentCode::SetDependentCode(Handle<HeapObject> object,
void DependentCode::InstallDependency(Isolate* isolate, Handle<Code> code,
Handle<HeapObject> object,
DependencyGroup group) {
- if (V8_UNLIKELY(FLAG_trace_code_dependencies)) {
+ if (V8_UNLIKELY(FLAG_trace_compilation_dependencies)) {
StdoutStream{} << "Installing dependency of [" << code->GetHeapObject()
<< "] on [" << object << "] in group ["
<< DependencyGroupName(group) << "]\n";
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index 2d6fc3e983..2b2c874d86 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -43,7 +43,7 @@ class CodeDataContainer : public HeapObject {
public:
NEVER_READ_ONLY_SPACE
DECL_ACCESSORS(next_code_link, Object)
- DECL_INT_ACCESSORS(kind_specific_flags)
+ DECL_RELAXED_INT32_ACCESSORS(kind_specific_flags)
// Clear uninitialized padding space. This ensures that the snapshot content
// is deterministic.
@@ -279,8 +279,12 @@ class Code : public HeapObject {
// This function should be called only from GC.
void ClearEmbeddedObjects(Heap* heap);
- // [deoptimization_data]: Array containing data for deopt.
+ // [deoptimization_data]: Array containing data for deopt for non-baseline
+ // code.
DECL_ACCESSORS(deoptimization_data, FixedArray)
+ // [bytecode_or_interpreter_data]: BytecodeArray or InterpreterData for
+ // baseline code.
+ DECL_ACCESSORS(bytecode_or_interpreter_data, HeapObject)
// [source_position_table]: ByteArray for the source positions table for
// non-baseline code.
@@ -511,7 +515,7 @@ class Code : public HeapObject {
// Layout description.
#define CODE_FIELDS(V) \
V(kRelocationInfoOffset, kTaggedSize) \
- V(kDeoptimizationDataOffset, kTaggedSize) \
+ V(kDeoptimizationDataOrInterpreterDataOffset, kTaggedSize) \
V(kPositionTableOffset, kTaggedSize) \
V(kCodeDataContainerOffset, kTaggedSize) \
/* Data or code not directly visited by GC directly starts here. */ \
@@ -544,8 +548,10 @@ class Code : public HeapObject {
static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 24;
#elif V8_TARGET_ARCH_MIPS64
static constexpr int kHeaderPaddingSize = 24;
+#elif V8_TARGET_ARCH_LOONG64
+ static constexpr int kHeaderPaddingSize = 24;
#elif V8_TARGET_ARCH_X64
- static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 24;
+ static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 56;
#elif V8_TARGET_ARCH_ARM
static constexpr int kHeaderPaddingSize = 12;
#elif V8_TARGET_ARCH_IA32
@@ -647,6 +653,10 @@ class Code::OptimizedCodeIterator {
inline CodeT ToCodeT(Code code);
inline Code FromCodeT(CodeT code);
inline Code FromCodeT(CodeT code, RelaxedLoadTag);
+inline Code FromCodeT(CodeT code, AcquireLoadTag);
+inline Code FromCodeT(CodeT code, PtrComprCageBase);
+inline Code FromCodeT(CodeT code, PtrComprCageBase, RelaxedLoadTag);
+inline Code FromCodeT(CodeT code, PtrComprCageBase, AcquireLoadTag);
inline CodeDataContainer CodeDataContainerFromCodeT(CodeT code);
class AbstractCode : public HeapObject {
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
index 7fae0c9e0d..81ed696cb0 100644
--- a/deps/v8/src/objects/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_CONTEXTS_H_
#define V8_OBJECTS_CONTEXTS_H_
+#include "include/v8-promise.h"
#include "src/objects/fixed-array.h"
#include "src/objects/function-kind.h"
#include "src/objects/ordered-hash-table.h"
@@ -43,13 +44,8 @@ enum ContextLookupFlags {
V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
V(ASYNC_MODULE_EVALUATE_INTERNAL, JSFunction, \
async_module_evaluate_internal) \
- V(OBJECT_CREATE, JSFunction, object_create) \
V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
- V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
- V(MATH_POW_INDEX, JSFunction, math_pow) \
- V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
- promise_internal_constructor) \
V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
V(FUNCTION_PROTOTYPE_APPLY_INDEX, JSFunction, function_prototype_apply)
diff --git a/deps/v8/src/objects/feedback-cell-inl.h b/deps/v8/src/objects/feedback-cell-inl.h
index 84257e544c..46d92b8447 100644
--- a/deps/v8/src/objects/feedback-cell-inl.h
+++ b/deps/v8/src/objects/feedback-cell-inl.h
@@ -40,7 +40,7 @@ void FeedbackCell::reset_feedback_vector(
CHECK(value().IsFeedbackVector());
ClosureFeedbackCellArray closure_feedback_cell_array =
FeedbackVector::cast(value()).closure_feedback_cell_array();
- set_value(closure_feedback_cell_array);
+ set_value(closure_feedback_cell_array, kReleaseStore);
if (gc_notify_updated_slot) {
(*gc_notify_updated_slot)(*this, RawField(FeedbackCell::kValueOffset),
closure_feedback_cell_array);
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index dbf0222b84..98315ad73d 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -84,7 +84,6 @@ bool FixedArray::is_the_hole(Isolate* isolate, int index) {
return get(isolate, index).IsTheHole(isolate);
}
-#if !defined(_WIN32) || (defined(_WIN64) && _MSC_VER < 1930 && __cplusplus < 201703L)
void FixedArray::set(int index, Smi value) {
DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
@@ -92,7 +91,6 @@ void FixedArray::set(int index, Smi value) {
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
}
-#endif
void FixedArray::set(int index, Object value) {
DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map());
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index 6b0ac5d9ac..1dfd7dac13 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -134,18 +134,7 @@ class FixedArray
inline bool is_the_hole(Isolate* isolate, int index);
// Setter that doesn't need write barrier.
-#if !defined(_WIN32) || (defined(_WIN64) && _MSC_VER < 1930 && __cplusplus < 201703L)
inline void set(int index, Smi value);
-#else
- inline void set(int index, Smi value) {
- DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
- DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- DCHECK(Object(value).IsSmi());
- int offset = OffsetOfElementAt(index);
- RELAXED_WRITE_FIELD(*this, offset, value);
- }
-#endif
-
// Setter with explicit barrier mode.
inline void set(int index, Object value, WriteBarrierMode mode);
diff --git a/deps/v8/src/objects/instance-type.h b/deps/v8/src/objects/instance-type.h
index f7cdd28c05..de90f6baa1 100644
--- a/deps/v8/src/objects/instance-type.h
+++ b/deps/v8/src/objects/instance-type.h
@@ -139,6 +139,12 @@ enum InstanceType : uint16_t {
FIRST_TYPE = FIRST_HEAP_OBJECT_TYPE,
LAST_TYPE = LAST_HEAP_OBJECT_TYPE,
BIGINT_TYPE = BIG_INT_BASE_TYPE,
+
+#ifdef V8_EXTERNAL_CODE_SPACE
+ CODET_TYPE = CODE_DATA_CONTAINER_TYPE,
+#else
+ CODET_TYPE = CODE_TYPE,
+#endif
};
// This constant is defined outside of the InstanceType enum because the
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index ac43c319f5..99a7d62098 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -1666,6 +1666,145 @@ MaybeHandle<JSArray> Intl::GetCanonicalLocales(Isolate* isolate,
return CreateArrayFromList(isolate, maybe_ll.FromJust(), attr);
}
+namespace {
+
+MaybeHandle<JSArray> AvailableCollations(Isolate* isolate) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::StringEnumeration> enumeration(
+ icu::Collator::getKeywordValues("collation", status));
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSArray);
+ }
+ return Intl::ToJSArray(isolate, "co", enumeration.get(),
+ Intl::RemoveCollation, true);
+}
+
+MaybeHandle<JSArray> VectorToJSArray(Isolate* isolate,
+ const std::vector<std::string>& array) {
+ Factory* factory = isolate->factory();
+ Handle<FixedArray> fixed_array =
+ factory->NewFixedArray(static_cast<int32_t>(array.size()));
+ int32_t index = 0;
+ for (std::string item : array) {
+ Handle<String> str = factory->NewStringFromAsciiChecked(item.c_str());
+ fixed_array->set(index++, *str);
+ }
+ return factory->NewJSArrayWithElements(fixed_array);
+}
+
+MaybeHandle<JSArray> AvailableCurrencies(Isolate* isolate) {
+ UErrorCode status = U_ZERO_ERROR;
+ UEnumeration* ids =
+ ucurr_openISOCurrencies(UCURR_COMMON | UCURR_NON_DEPRECATED, &status);
+ const char* next = nullptr;
+ std::vector<std::string> array;
+ while (U_SUCCESS(status) &&
+ (next = uenum_next(ids, nullptr, &status)) != nullptr) {
+ array.push_back(next);
+ }
+ std::sort(array.begin(), array.end());
+ uenum_close(ids);
+ return VectorToJSArray(isolate, array);
+}
+
+MaybeHandle<JSArray> AvailableNumberingSystems(Isolate* isolate) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::StringEnumeration> enumeration(
+ icu::NumberingSystem::getAvailableNames(status));
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSArray);
+ }
+ // Need to filter out isAlgorithmic
+ return Intl::ToJSArray(
+ isolate, "nu", enumeration.get(),
+ [](const char* value) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::NumberingSystem> numbering_system(
+ icu::NumberingSystem::createInstanceByName(value, status));
+ // Skip algorithmic one since chrome filter out the resource.
+ return U_FAILURE(status) || numbering_system->isAlgorithmic();
+ },
+ true);
+}
+
+MaybeHandle<JSArray> AvailableTimeZones(Isolate* isolate) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::StringEnumeration> enumeration(
+ icu::TimeZone::createTimeZoneIDEnumeration(
+ UCAL_ZONE_TYPE_CANONICAL_LOCATION, nullptr, nullptr, status));
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSArray);
+ }
+ return Intl::ToJSArray(isolate, nullptr, enumeration.get(), nullptr, true);
+}
+
+MaybeHandle<JSArray> AvailableUnits(Isolate* isolate) {
+ Factory* factory = isolate->factory();
+ std::set<std::string> sanctioned(Intl::SanctionedSimpleUnits());
+ Handle<FixedArray> fixed_array =
+ factory->NewFixedArray(static_cast<int32_t>(sanctioned.size()));
+ int32_t index = 0;
+ for (std::string item : sanctioned) {
+ Handle<String> str = factory->NewStringFromAsciiChecked(item.c_str());
+ fixed_array->set(index++, *str);
+ }
+ return factory->NewJSArrayWithElements(fixed_array);
+}
+
+} // namespace
+
+// ecma-402 #sec-intl.supportedvaluesof
+MaybeHandle<JSArray> Intl::SupportedValuesOf(Isolate* isolate,
+ Handle<Object> key_obj) {
+ Factory* factory = isolate->factory();
+ // 1. 1. Let key be ? ToString(key).
+ Handle<String> key_str;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, key_str,
+ Object::ToString(isolate, key_obj), JSArray);
+ // 2. If key is "calendar", then
+ if (factory->calendar_string()->Equals(*key_str)) {
+ // a. Let list be ! AvailableCalendars( ).
+ return Intl::AvailableCalendars(isolate);
+ }
+ // 3. Else if key is "collation", then
+ if (factory->collation_string()->Equals(*key_str)) {
+ // a. Let list be ! AvailableCollations( ).
+ return AvailableCollations(isolate);
+ }
+ // 4. Else if key is "currency", then
+ if (factory->currency_string()->Equals(*key_str)) {
+ // a. Let list be ! AvailableCurrencies( ).
+ return AvailableCurrencies(isolate);
+ }
+ // 5. Else if key is "numberingSystem", then
+ if (factory->numberingSystem_string()->Equals(*key_str)) {
+ // a. Let list be ! AvailableNumberingSystems( ).
+ return AvailableNumberingSystems(isolate);
+ }
+ // 6. Else if key is "timeZone", then
+ if (factory->timeZone_string()->Equals(*key_str)) {
+ // a. Let list be ! AvailableTimeZones( ).
+ return AvailableTimeZones(isolate);
+ }
+ // 7. Else if key is "unit", then
+ if (factory->unit_string()->Equals(*key_str)) {
+ // a. Let list be ! AvailableUnits( ).
+ return AvailableUnits(isolate);
+ }
+ // 8. Else,
+ // a. Throw a RangeError exception.
+ // 9. Return ! CreateArrayFromList( list ).
+
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalid,
+ factory->NewStringFromStaticChars("key"), key_str),
+ JSArray);
+}
+
// ECMA 402 Intl.*.supportedLocalesOf
MaybeHandle<JSObject> Intl::SupportedLocalesOf(
Isolate* isolate, const char* method,
@@ -2247,5 +2386,51 @@ MaybeHandle<JSReceiver> Intl::CoerceOptionsToObject(Isolate* isolate,
return Handle<JSReceiver>::cast(options);
}
+MaybeHandle<JSArray> Intl::ToJSArray(
+ Isolate* isolate, const char* unicode_key,
+ icu::StringEnumeration* enumeration,
+ const std::function<bool(const char*)>& removes, bool sort) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::vector<std::string> array;
+ for (const char* item = enumeration->next(nullptr, status);
+ U_SUCCESS(status) && item != nullptr;
+ item = enumeration->next(nullptr, status)) {
+ if (unicode_key != nullptr) {
+ item = uloc_toUnicodeLocaleType(unicode_key, item);
+ }
+ if (removes == nullptr || !(removes)(item)) {
+ array.push_back(item);
+ }
+ }
+
+ if (sort) {
+ std::sort(array.begin(), array.end());
+ }
+ return VectorToJSArray(isolate, array);
+}
+
+bool Intl::RemoveCollation(const char* collation) {
+ return strcmp("standard", collation) == 0 || strcmp("search", collation) == 0;
+}
+
+// See the list in ecma402 #sec-issanctionedsimpleunitidentifier
+std::set<std::string> Intl::SanctionedSimpleUnits() {
+ return std::set<std::string>({"acre", "bit", "byte",
+ "celsius", "centimeter", "day",
+ "degree", "fahrenheit", "fluid-ounce",
+ "foot", "gallon", "gigabit",
+ "gigabyte", "gram", "hectare",
+ "hour", "inch", "kilobit",
+ "kilobyte", "kilogram", "kilometer",
+ "liter", "megabit", "megabyte",
+ "meter", "mile", "mile-scandinavian",
+ "millimeter", "milliliter", "millisecond",
+ "minute", "month", "ounce",
+ "percent", "petabyte", "pound",
+ "second", "stone", "terabit",
+ "terabyte", "week", "yard",
+ "year"});
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index ec0eb93873..122ca4b746 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -27,6 +27,7 @@ namespace U_ICU_NAMESPACE {
class BreakIterator;
class Collator;
class FormattedValue;
+class StringEnumeration;
class UnicodeString;
} // namespace U_ICU_NAMESPACE
@@ -142,6 +143,10 @@ class Intl {
V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> GetCanonicalLocales(
Isolate* isolate, Handle<Object> locales);
+ // ecma-402 #sec-intl.supportedvaluesof
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> SupportedValuesOf(
+ Isolate* isolate, Handle<Object> key);
+
// For locale sensitive functions
V8_WARN_UNUSED_RESULT static MaybeHandle<String> StringLocaleConvertCase(
Isolate* isolate, Handle<String> s, bool is_upper,
@@ -338,6 +343,18 @@ class Intl {
// ecma402/#sec-coerceoptionstoobject
V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> CoerceOptionsToObject(
Isolate* isolate, Handle<Object> options, const char* service);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> ToJSArray(
+ Isolate* isolate, const char* unicode_key,
+ icu::StringEnumeration* enumeration,
+ const std::function<bool(const char*)>& removes, bool sort);
+
+ static bool RemoveCollation(const char* collation);
+
+ static std::set<std::string> SanctionedSimpleUnits();
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> AvailableCalendars(
+ Isolate* isolate);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 7ea8aeb3e5..e1de03dcf9 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -30,10 +30,6 @@ ACCESSORS(JSTypedArray, base_pointer, Object, kBasePointerOffset)
RELEASE_ACQUIRE_ACCESSORS(JSTypedArray, base_pointer, Object,
kBasePointerOffset)
-void JSArrayBuffer::AllocateExternalPointerEntries(Isolate* isolate) {
- InitExternalPointerField(kBackingStoreOffset, isolate);
-}
-
size_t JSArrayBuffer::byte_length() const {
return ReadField<size_t>(kByteLengthOffset);
}
@@ -43,26 +39,20 @@ void JSArrayBuffer::set_byte_length(size_t value) {
}
DEF_GETTER(JSArrayBuffer, backing_store, void*) {
- Isolate* isolate = GetIsolateForHeapSandbox(*this);
- Address value = ReadExternalPointerField(kBackingStoreOffset, isolate,
- kArrayBufferBackingStoreTag);
- return reinterpret_cast<void*>(value);
+ return reinterpret_cast<void*>(ReadField<Address>(kBackingStoreOffset));
}
void JSArrayBuffer::set_backing_store(Isolate* isolate, void* value) {
- WriteExternalPointerField(kBackingStoreOffset, isolate,
- reinterpret_cast<Address>(value),
- kArrayBufferBackingStoreTag);
+ DCHECK(IsValidBackingStorePointer(value));
+ WriteField<Address>(kBackingStoreOffset, reinterpret_cast<Address>(value));
}
uint32_t JSArrayBuffer::GetBackingStoreRefForDeserialization() const {
- return static_cast<uint32_t>(
- ReadField<ExternalPointer_t>(kBackingStoreOffset));
+ return static_cast<uint32_t>(ReadField<Address>(kBackingStoreOffset));
}
void JSArrayBuffer::SetBackingStoreRefForSerialization(uint32_t ref) {
- WriteField<ExternalPointer_t>(kBackingStoreOffset,
- static_cast<ExternalPointer_t>(ref));
+ WriteField<Address>(kBackingStoreOffset, static_cast<Address>(ref));
}
ArrayBufferExtension* JSArrayBuffer::extension() const {
@@ -238,10 +228,6 @@ size_t JSTypedArray::GetLength() const {
return GetLengthOrOutOfBounds(out_of_bounds);
}
-void JSTypedArray::AllocateExternalPointerEntries(Isolate* isolate) {
- InitExternalPointerField(kExternalPointerOffset, isolate);
-}
-
size_t JSTypedArray::length() const {
DCHECK(!is_length_tracking());
DCHECK(!is_backed_by_rab());
@@ -257,18 +243,16 @@ void JSTypedArray::set_length(size_t value) {
}
DEF_GETTER(JSTypedArray, external_pointer, Address) {
- Isolate* isolate = GetIsolateForHeapSandbox(*this);
- return ReadExternalPointerField(kExternalPointerOffset, isolate,
- kTypedArrayExternalPointerTag);
+ return ReadField<Address>(kExternalPointerOffset);
}
-DEF_GETTER(JSTypedArray, external_pointer_raw, ExternalPointer_t) {
- return ReadField<ExternalPointer_t>(kExternalPointerOffset);
+DEF_GETTER(JSTypedArray, external_pointer_raw, Address) {
+ return ReadField<Address>(kExternalPointerOffset);
}
void JSTypedArray::set_external_pointer(Isolate* isolate, Address value) {
- WriteExternalPointerField(kExternalPointerOffset, isolate, value,
- kTypedArrayExternalPointerTag);
+ DCHECK(IsValidBackingStorePointer(reinterpret_cast<void*>(value)));
+ WriteField<Address>(kExternalPointerOffset, value);
}
Address JSTypedArray::ExternalPointerCompensationForOnHeapArray(
@@ -282,14 +266,12 @@ Address JSTypedArray::ExternalPointerCompensationForOnHeapArray(
uint32_t JSTypedArray::GetExternalBackingStoreRefForDeserialization() const {
DCHECK(!is_on_heap());
- return static_cast<uint32_t>(
- ReadField<ExternalPointer_t>(kExternalPointerOffset));
+ return static_cast<uint32_t>(ReadField<Address>(kExternalPointerOffset));
}
void JSTypedArray::SetExternalBackingStoreRefForSerialization(uint32_t ref) {
DCHECK(!is_on_heap());
- WriteField<ExternalPointer_t>(kExternalPointerOffset,
- static_cast<ExternalPointer_t>(ref));
+ WriteField<Address>(kExternalPointerOffset, static_cast<Address>(ref));
}
void JSTypedArray::RemoveExternalPointerCompensationForSerialization(
@@ -390,19 +372,12 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
}
DEF_GETTER(JSDataView, data_pointer, void*) {
- Isolate* isolate = GetIsolateForHeapSandbox(*this);
- return reinterpret_cast<void*>(ReadExternalPointerField(
- kDataPointerOffset, isolate, kDataViewDataPointerTag));
-}
-
-void JSDataView::AllocateExternalPointerEntries(Isolate* isolate) {
- InitExternalPointerField(kDataPointerOffset, isolate);
+ return reinterpret_cast<void*>(ReadField<Address>(kDataPointerOffset));
}
void JSDataView::set_data_pointer(Isolate* isolate, void* value) {
- WriteExternalPointerField(kDataPointerOffset, isolate,
- reinterpret_cast<Address>(value),
- kDataViewDataPointerTag);
+ DCHECK(IsValidBackingStorePointer(value));
+ WriteField<Address>(kDataPointerOffset, reinterpret_cast<Address>(value));
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
index 917a055b46..bbe635ee2a 100644
--- a/deps/v8/src/objects/js-array-buffer.cc
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -55,7 +55,6 @@ void JSArrayBuffer::Setup(SharedFlag shared, ResizableFlag resizable,
SetEmbedderField(i, Smi::zero());
}
set_extension(nullptr);
- AllocateExternalPointerEntries(GetIsolate());
if (!backing_store) {
set_backing_store(GetIsolate(), nullptr);
set_byte_length(0);
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index 1522f4b951..e5a68f3923 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_JS_ARRAY_BUFFER_H_
#define V8_OBJECTS_JS_ARRAY_BUFFER_H_
+#include "include/v8-typed-array.h"
#include "src/objects/backing-store.h"
#include "src/objects/js-objects.h"
#include "torque-generated/bit-fields.h"
@@ -32,12 +33,6 @@ class JSArrayBuffer
static constexpr size_t kMaxByteLength = kMaxSafeInteger;
#endif
- // When soft sandbox is enabled, creates entries in external pointer table for
- // all JSArrayBuffer's fields that require soft sandbox protection (backing
- // store pointer, backing store length, etc.).
- // When sandbox is not enabled, it's a no-op.
- inline void AllocateExternalPointerEntries(Isolate* isolate);
-
// [byte_length]: length in bytes
DECL_PRIMITIVE_ACCESSORS(byte_length, size_t)
@@ -283,12 +278,6 @@ class JSTypedArray
V8_EXPORT_PRIVATE Handle<JSArrayBuffer> GetBuffer();
- // When soft sandbox is enabled, creates entries in external pointer table for
- // all JSTypedArray's fields that require soft sandbox protection (external
- // pointer, offset, length, etc.).
- // When sandbox is not enabled, it's a no-op.
- inline void AllocateExternalPointerEntries(Isolate* isolate);
-
// The `DataPtr` is `base_ptr + external_pointer`, and `base_ptr` is nullptr
// for off-heap typed arrays.
static constexpr bool kOffHeapDataPtrEqualsExternalPointer = true;
@@ -392,12 +381,6 @@ class JSDataView
DECL_GETTER(data_pointer, void*)
inline void set_data_pointer(Isolate* isolate, void* value);
- // When soft sandbox is enabled, creates entries in external pointer table for
- // all JSDataView's fields that require soft sandbox protection (data pointer,
- // offset, length, etc.).
- // When sandbox is not enabled, it's a no-op.
- inline void AllocateExternalPointerEntries(Isolate* isolate);
-
// Dispatched behavior.
DECL_PRINTER(JSDataView)
DECL_VERIFIER(JSDataView)
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
index ed7ab4e003..3b9f796263 100644
--- a/deps/v8/src/objects/js-array-inl.h
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -15,11 +15,10 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(JSArray, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSArrayIterator, JSObject)
+#include "torque-generated/src/objects/js-array-tq-inl.inc"
-CAST_ACCESSOR(JSArray)
-CAST_ACCESSOR(JSArrayIterator)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSArray)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSArrayIterator)
DEF_GETTER(JSArray, length, Object) {
return TaggedField<Object, kLengthOffset>::load(cage_base, *this);
@@ -70,9 +69,6 @@ bool JSArray::HasArrayPrototype(Isolate* isolate) {
return map().prototype() == *isolate->initial_array_prototype();
}
-ACCESSORS(JSArrayIterator, iterated_object, Object, kIteratedObjectOffset)
-ACCESSORS(JSArrayIterator, next_index, Object, kNextIndexOffset)
-
SMI_ACCESSORS(JSArrayIterator, raw_kind, kKindOffset)
IterationKind JSArrayIterator::kind() const {
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index 776cb4446b..4d725a0905 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -16,12 +16,14 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-array-tq.inc"
+
// The JSArray describes JavaScript Arrays
// Such an array can be in one of two modes:
// - fast, backing storage is a FixedArray and length <= elements.length();
// Please note: push and pop can be used to grow and shrink the array.
// - slow, backing storage is a HashTable with numbers as keys.
-class JSArray : public JSObject {
+class JSArray : public TorqueGeneratedJSArray<JSArray, JSObject> {
public:
// [length]: The length property.
DECL_ACCESSORS(length, Object)
@@ -109,8 +111,6 @@ class JSArray : public JSObject {
// to Proxies and objects with a hidden prototype.
inline bool HasArrayPrototype(Isolate* isolate);
- DECL_CAST(JSArray)
-
// Dispatched behavior.
DECL_PRINTER(JSArray)
DECL_VERIFIER(JSArray)
@@ -118,9 +118,6 @@ class JSArray : public JSObject {
// Number of element slots to pre-allocate for an empty array.
static const int kPreallocatedArrayElements = 4;
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_ARRAY_FIELDS)
-
static const int kLengthDescriptorIndex = 0;
// Max. number of elements being copied in Array builtins.
@@ -144,7 +141,7 @@ class JSArray : public JSObject {
AllocationMemento::kSize) >>
kDoubleSizeLog2;
- OBJECT_CONSTRUCTORS(JSArray, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSArray)
};
Handle<Object> CacheInitialJSArrayMaps(Isolate* isolate,
@@ -153,52 +150,20 @@ Handle<Object> CacheInitialJSArrayMaps(Isolate* isolate,
// The JSArrayIterator describes JavaScript Array Iterators Objects, as
// defined in ES section #sec-array-iterator-objects.
-class JSArrayIterator : public JSObject {
+class JSArrayIterator
+ : public TorqueGeneratedJSArrayIterator<JSArrayIterator, JSObject> {
public:
DECL_PRINTER(JSArrayIterator)
DECL_VERIFIER(JSArrayIterator)
- DECL_CAST(JSArrayIterator)
-
- // [iterated_object]: the [[IteratedObject]] inobject property.
- DECL_ACCESSORS(iterated_object, Object)
-
- // [next_index]: The [[ArrayIteratorNextIndex]] inobject property.
- // The next_index is always a positive integer, and it points to
- // the next index that is to be returned by this iterator. It's
- // possible range is fixed depending on the [[iterated_object]]:
- //
- // 1. For JSArray's the next_index is always in Unsigned32
- // range, and when the iterator reaches the end it's set
- // to kMaxUInt32 to indicate that this iterator should
- // never produce values anymore even if the "length"
- // property of the JSArray changes at some later point.
- // 2. For JSTypedArray's the next_index is always in
- // UnsignedSmall range, and when the iterator terminates
- // it's set to Smi::kMaxValue.
- // 3. For all other JSReceiver's it's always between 0 and
- // kMaxSafeInteger, and the latter value is used to mark
- // termination.
- //
- // It's important that for 1. and 2. the value fits into the
- // Unsigned32 range (UnsignedSmall is a subset of Unsigned32),
- // since we use this knowledge in the fast-path for the array
- // iterator next calls in TurboFan (in the JSCallReducer) to
- // keep the index in Word32 representation. This invariant is
- // checked in JSArrayIterator::JSArrayIteratorVerify().
- DECL_ACCESSORS(next_index, Object)
-
// [kind]: the [[ArrayIterationKind]] inobject property.
inline IterationKind kind() const;
inline void set_kind(IterationKind kind);
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_ARRAY_ITERATOR_FIELDS)
-
private:
DECL_INT_ACCESSORS(raw_kind)
- OBJECT_CONSTRUCTORS(JSArrayIterator, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSArrayIterator)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-array.tq b/deps/v8/src/objects/js-array.tq
index 3ccf37b150..e9f7d86c44 100644
--- a/deps/v8/src/objects/js-array.tq
+++ b/deps/v8/src/objects/js-array.tq
@@ -4,10 +4,34 @@
extern enum IterationKind extends uint31 { kKeys, kValues, kEntries }
-@doNotGenerateCppClass
extern class JSArrayIterator extends JSObject {
iterated_object: JSReceiver;
+
+ // [next_index]: The [[ArrayIteratorNextIndex]] inobject property.
+ // The next_index is always a positive integer, and it points to
+ // the next index that is to be returned by this iterator. It's
+ // possible range is fixed depending on the [[iterated_object]]:
+ //
+ // 1. For JSArray's the next_index is always in Unsigned32
+ // range, and when the iterator reaches the end it's set
+ // to kMaxUInt32 to indicate that this iterator should
+ // never produce values anymore even if the "length"
+ // property of the JSArray changes at some later point.
+ // 2. For JSTypedArray's the next_index is always in
+ // UnsignedSmall range, and when the iterator terminates
+ // it's set to Smi::kMaxValue.
+ // 3. For all other JSReceiver's it's always between 0 and
+ // kMaxSafeInteger, and the latter value is used to mark
+ // termination.
+ //
+ // It's important that for 1. and 2. the value fits into the
+ // Unsigned32 range (UnsignedSmall is a subset of Unsigned32),
+ // since we use this knowledge in the fast-path for the array
+ // iterator next calls in TurboFan (in the JSCallReducer) to
+ // keep the index in Word32 representation. This invariant is
+ // checked in JSArrayIterator::JSArrayIteratorVerify().
next_index: Number;
+
kind: SmiTagged<IterationKind>;
}
@@ -25,7 +49,6 @@ macro CreateArrayIterator(implicit context: NativeContext)(
};
}
-@doNotGenerateCppClass
extern class JSArray extends JSObject {
macro IsEmpty(): bool {
return this.length == 0;
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
index 7e2ece76a9..868b0a3be2 100644
--- a/deps/v8/src/objects/js-date-time-format.cc
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -2209,8 +2209,8 @@ template <typename T>
MaybeHandle<T> FormatRangeCommon(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format, double x,
double y,
- MaybeHandle<T> (*formatToResult)(Isolate*, const icu::FormattedValue&,
- bool*),
+ const std::function<MaybeHandle<T>(Isolate*, const icu::FormattedValue&,
+ bool*)>& formatToResult,
bool* outputRange) {
// Track newer feature formateRange and formatRangeToParts
isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateTimeFormatRange);
diff --git a/deps/v8/src/objects/js-function-inl.h b/deps/v8/src/objects/js-function-inl.h
index 275ffba14d..15634b8f02 100644
--- a/deps/v8/src/objects/js-function-inl.h
+++ b/deps/v8/src/objects/js-function-inl.h
@@ -27,9 +27,7 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(JSFunctionOrBoundFunction)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSBoundFunction)
-OBJECT_CONSTRUCTORS_IMPL(JSFunction, JSFunctionOrBoundFunction)
-
-CAST_ACCESSOR(JSFunction)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSFunction)
ACCESSORS(JSFunction, raw_feedback_cell, FeedbackCell, kFeedbackCellOffset)
RELEASE_ACQUIRE_ACCESSORS(JSFunction, raw_feedback_cell, FeedbackCell,
@@ -55,7 +53,7 @@ void JSFunction::ClearOptimizationMarker() {
}
bool JSFunction::ChecksOptimizationMarker() {
- return code(kAcquireLoad).checks_optimization_marker();
+ return code().checks_optimization_marker();
}
bool JSFunction::IsMarkedForOptimization() {
@@ -218,12 +216,6 @@ NativeContext JSFunction::native_context() {
return context().native_context();
}
-void JSFunction::set_context(HeapObject value, WriteBarrierMode mode) {
- DCHECK(value.IsUndefined() || value.IsContext());
- WRITE_FIELD(*this, kContextOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, kContextOffset, value, mode);
-}
-
RELEASE_ACQUIRE_ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map,
HeapObject, kPrototypeOrInitialMapOffset,
map().has_prototype_slot())
@@ -332,7 +324,7 @@ bool JSFunction::NeedsResetDueToFlushedBytecode() {
}
bool JSFunction::NeedsResetDueToFlushedBaselineCode() {
- return code().kind() == CodeKind::BASELINE && !shared().HasBaselineData();
+ return code().kind() == CodeKind::BASELINE && !shared().HasBaselineCode();
}
void JSFunction::ResetIfCodeFlushed(
diff --git a/deps/v8/src/objects/js-function.cc b/deps/v8/src/objects/js-function.cc
index b2d086814f..3bcaf07387 100644
--- a/deps/v8/src/objects/js-function.cc
+++ b/deps/v8/src/objects/js-function.cc
@@ -19,19 +19,10 @@ namespace v8 {
namespace internal {
CodeKinds JSFunction::GetAttachedCodeKinds() const {
- // Note: There's a special case when bytecode has been aged away. After
- // flushing the bytecode, the JSFunction will still have the interpreter
- // entry trampoline attached, but the bytecode is no longer available.
- Code code = this->code(kAcquireLoad);
- if (code.is_interpreter_trampoline_builtin()) {
- return CodeKindFlag::INTERPRETED_FUNCTION;
- }
-
- const CodeKind kind = code.kind();
+ const CodeKind kind = code().kind();
if (!CodeKindIsJSFunction(kind)) return {};
-
- if (CodeKindIsOptimizedJSFunction(kind) && code.marked_for_deoptimization()) {
- // Nothing is attached.
+ if (CodeKindIsOptimizedJSFunction(kind) &&
+ code().marked_for_deoptimization()) {
return {};
}
return CodeKindToCodeKindFlag(kind);
@@ -49,7 +40,7 @@ CodeKinds JSFunction::GetAvailableCodeKinds() const {
if ((result & CodeKindFlag::BASELINE) == 0) {
// The SharedFunctionInfo could have attached baseline code.
- if (shared().HasBaselineData()) {
+ if (shared().HasBaselineCode()) {
result |= CodeKindFlag::BASELINE;
}
}
@@ -90,7 +81,8 @@ namespace {
// Returns false if no highest tier exists (i.e. the function is not compiled),
// otherwise returns true and sets highest_tier.
-bool HighestTierOf(CodeKinds kinds, CodeKind* highest_tier) {
+V8_WARN_UNUSED_RESULT bool HighestTierOf(CodeKinds kinds,
+ CodeKind* highest_tier) {
DCHECK_EQ((kinds & ~kJSFunctionCodeKindsMask), 0);
if ((kinds & CodeKindFlag::TURBOFAN) != 0) {
*highest_tier = CodeKind::TURBOFAN;
@@ -111,33 +103,43 @@ bool HighestTierOf(CodeKinds kinds, CodeKind* highest_tier) {
} // namespace
-bool JSFunction::ActiveTierIsIgnition() const {
- if (!shared().HasBytecodeArray()) return false;
- bool result = (GetActiveTier() == CodeKind::INTERPRETED_FUNCTION);
+base::Optional<CodeKind> JSFunction::GetActiveTier() const {
+#if V8_ENABLE_WEBASSEMBLY
+ // Asm/Wasm functions are currently not supported. For simplicity, this
+ // includes invalid asm.js functions whose code hasn't yet been updated to
+ // CompileLazy but is still the InstantiateAsmJs builtin.
+ if (shared().HasAsmWasmData() ||
+ code().builtin_id() == Builtin::kInstantiateAsmJs) {
+ return {};
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+
+ CodeKind highest_tier;
+ if (!HighestTierOf(GetAvailableCodeKinds(), &highest_tier)) return {};
+
#ifdef DEBUG
- Code code = this->code(kAcquireLoad);
- DCHECK_IMPLIES(result, code.is_interpreter_trampoline_builtin() ||
- (CodeKindIsOptimizedJSFunction(code.kind()) &&
- code.marked_for_deoptimization()) ||
- (code.builtin_id() == Builtin::kCompileLazy &&
- shared().IsInterpreted()));
+ CHECK(highest_tier == CodeKind::TURBOFAN ||
+ highest_tier == CodeKind::BASELINE ||
+ highest_tier == CodeKind::TURBOPROP ||
+ highest_tier == CodeKind::INTERPRETED_FUNCTION);
+
+ if (highest_tier == CodeKind::INTERPRETED_FUNCTION) {
+ CHECK(code().is_interpreter_trampoline_builtin() ||
+ (CodeKindIsOptimizedJSFunction(code().kind()) &&
+ code().marked_for_deoptimization()) ||
+ (code().builtin_id() == Builtin::kCompileLazy &&
+ shared().IsInterpreted()));
+ }
#endif // DEBUG
- return result;
-}
-CodeKind JSFunction::GetActiveTier() const {
- CodeKind highest_tier;
- DCHECK(shared().is_compiled());
- HighestTierOf(GetAvailableCodeKinds(), &highest_tier);
- DCHECK(highest_tier == CodeKind::TURBOFAN ||
- highest_tier == CodeKind::BASELINE ||
- highest_tier == CodeKind::TURBOPROP ||
- highest_tier == CodeKind::INTERPRETED_FUNCTION);
return highest_tier;
}
+bool JSFunction::ActiveTierIsIgnition() const {
+ return GetActiveTier() == CodeKind::INTERPRETED_FUNCTION;
+}
+
bool JSFunction::ActiveTierIsTurbofan() const {
- if (!shared().HasBytecodeArray()) return false;
return GetActiveTier() == CodeKind::TURBOFAN;
}
@@ -145,27 +147,20 @@ bool JSFunction::ActiveTierIsBaseline() const {
return GetActiveTier() == CodeKind::BASELINE;
}
-bool JSFunction::ActiveTierIsIgnitionOrBaseline() const {
- return ActiveTierIsIgnition() || ActiveTierIsBaseline();
-}
-
bool JSFunction::ActiveTierIsToptierTurboprop() const {
- if (!FLAG_turboprop_as_toptier) return false;
- if (!shared().HasBytecodeArray()) return false;
- return GetActiveTier() == CodeKind::TURBOPROP && FLAG_turboprop_as_toptier;
+ return FLAG_turboprop_as_toptier && GetActiveTier() == CodeKind::TURBOPROP;
}
bool JSFunction::ActiveTierIsMidtierTurboprop() const {
- if (!FLAG_turboprop) return false;
- if (!shared().HasBytecodeArray()) return false;
- return GetActiveTier() == CodeKind::TURBOPROP && !FLAG_turboprop_as_toptier;
+ return FLAG_turboprop && !FLAG_turboprop_as_toptier &&
+ GetActiveTier() == CodeKind::TURBOPROP;
}
CodeKind JSFunction::NextTier() const {
if (V8_UNLIKELY(FLAG_turboprop) && ActiveTierIsMidtierTurboprop()) {
return CodeKind::TURBOFAN;
} else if (V8_UNLIKELY(FLAG_turboprop)) {
- DCHECK(ActiveTierIsIgnitionOrBaseline());
+ DCHECK(ActiveTierIsIgnition() || ActiveTierIsBaseline());
return CodeKind::TURBOPROP;
}
return CodeKind::TURBOFAN;
diff --git a/deps/v8/src/objects/js-function.h b/deps/v8/src/objects/js-function.h
index 6d7b21abe9..b7df4daf8b 100644
--- a/deps/v8/src/objects/js-function.h
+++ b/deps/v8/src/objects/js-function.h
@@ -53,7 +53,8 @@ class JSBoundFunction
};
// JSFunction describes JavaScript functions.
-class JSFunction : public JSFunctionOrBoundFunction {
+class JSFunction
+ : public TorqueGeneratedJSFunction<JSFunction, JSFunctionOrBoundFunction> {
public:
// [prototype_or_initial_map]:
DECL_RELEASE_ACQUIRE_ACCESSORS(prototype_or_initial_map, HeapObject)
@@ -70,8 +71,6 @@ class JSFunction : public JSFunctionOrBoundFunction {
inline Context context();
DECL_RELAXED_GETTER(context, Context)
inline bool has_context() const;
- inline void set_context(HeapObject context,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline JSGlobalProxy global_proxy();
inline NativeContext native_context();
inline int length();
@@ -106,7 +105,8 @@ class JSFunction : public JSFunctionOrBoundFunction {
// indirect means such as the feedback vector's optimized code cache.
// - Active: the single code kind that would be executed if this function
// were called in its current state. Note that there may not be an active
- // code kind if the function is not compiled.
+ // code kind if the function is not compiled. Also, asm/wasm functions are
+ // currently not supported.
//
// Note: code objects that are marked_for_deoptimization are not part of the
// attached/available/active sets. This is because the JSFunction might have
@@ -120,11 +120,10 @@ class JSFunction : public JSFunctionOrBoundFunction {
bool HasAttachedCodeKind(CodeKind kind) const;
bool HasAvailableCodeKind(CodeKind kind) const;
- CodeKind GetActiveTier() const;
+ base::Optional<CodeKind> GetActiveTier() const;
V8_EXPORT_PRIVATE bool ActiveTierIsIgnition() const;
bool ActiveTierIsTurbofan() const;
bool ActiveTierIsBaseline() const;
- bool ActiveTierIsIgnitionOrBaseline() const;
bool ActiveTierIsMidtierTurboprop() const;
bool ActiveTierIsToptierTurboprop() const;
@@ -275,8 +274,6 @@ class JSFunction : public JSFunctionOrBoundFunction {
// Prints the name of the function using PrintF.
void PrintName(FILE* out = stdout);
- DECL_CAST(JSFunction)
-
// Calculate the instance size and in-object properties count.
// {CalculateExpectedNofProperties} can trigger compilation.
static V8_WARN_UNUSED_RESULT int CalculateExpectedNofProperties(
@@ -310,18 +307,6 @@ class JSFunction : public JSFunctionOrBoundFunction {
// ES6 section 19.2.3.5 Function.prototype.toString ( ).
static Handle<String> ToString(Handle<JSFunction> function);
- struct FieldOffsets {
- DEFINE_FIELD_OFFSET_CONSTANTS(JSFunctionOrBoundFunction::kHeaderSize,
- TORQUE_GENERATED_JS_FUNCTION_FIELDS)
- };
- static constexpr int kSharedFunctionInfoOffset =
- FieldOffsets::kSharedFunctionInfoOffset;
- static constexpr int kContextOffset = FieldOffsets::kContextOffset;
- static constexpr int kFeedbackCellOffset = FieldOffsets::kFeedbackCellOffset;
- static constexpr int kCodeOffset = FieldOffsets::kCodeOffset;
- static constexpr int kPrototypeOrInitialMapOffset =
- FieldOffsets::kPrototypeOrInitialMapOffset;
-
class BodyDescriptor;
private:
@@ -329,9 +314,15 @@ class JSFunction : public JSFunctionOrBoundFunction {
DECL_RELEASE_ACQUIRE_ACCESSORS(raw_code, CodeT)
// JSFunction doesn't have a fixed header size:
- // Hide JSFunctionOrBoundFunction::kHeaderSize to avoid confusion.
+ // Hide TorqueGeneratedClass::kHeaderSize to avoid confusion.
static const int kHeaderSize;
+ // Hide generated accessors; custom accessors are called "shared".
+ DECL_ACCESSORS(shared_function_info, SharedFunctionInfo)
+
+ // Hide generated accessors; custom accessors are called "raw_feedback_cell".
+ DECL_ACCESSORS(feedback_cell, FeedbackCell)
+
// Returns the set of code kinds of compilation artifacts (bytecode,
// generated code) attached to this JSFunction.
// Note that attached code objects that are marked_for_deoptimization are not
@@ -348,9 +339,9 @@ class JSFunction : public JSFunctionOrBoundFunction {
public:
static constexpr int kSizeWithoutPrototype = kPrototypeOrInitialMapOffset;
- static constexpr int kSizeWithPrototype = FieldOffsets::kHeaderSize;
+ static constexpr int kSizeWithPrototype = TorqueGeneratedClass::kHeaderSize;
- OBJECT_CONSTRUCTORS(JSFunction, JSFunctionOrBoundFunction);
+ TQ_OBJECT_CONSTRUCTORS(JSFunction)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-function.tq b/deps/v8/src/objects/js-function.tq
index de934b82f4..8932ea4395 100644
--- a/deps/v8/src/objects/js-function.tq
+++ b/deps/v8/src/objects/js-function.tq
@@ -18,7 +18,6 @@ extern class JSBoundFunction extends JSFunctionOrBoundFunction {
}
@highestInstanceTypeWithinParentClassRange
-@doNotGenerateCppClass
extern class JSFunction extends JSFunctionOrBoundFunction {
shared_function_info: SharedFunctionInfo;
context: Context;
diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc
index 9ff9c82d12..ae9e7302bf 100644
--- a/deps/v8/src/objects/js-list-format.cc
+++ b/deps/v8/src/objects/js-list-format.cc
@@ -220,7 +220,8 @@ Maybe<std::vector<icu::UnicodeString>> ToUnicodeStringArray(
template <typename T>
MaybeHandle<T> FormatListCommon(
Isolate* isolate, Handle<JSListFormat> format, Handle<JSArray> list,
- MaybeHandle<T> (*formatToResult)(Isolate*, const icu::FormattedValue&)) {
+ const std::function<MaybeHandle<T>(Isolate*, const icu::FormattedValue&)>&
+ formatToResult) {
DCHECK(!list->IsUndefined());
Maybe<std::vector<icu::UnicodeString>> maybe_array =
ToUnicodeStringArray(isolate, list);
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index 64644abad2..51cf1453f4 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -177,19 +177,26 @@ int32_t weekdayFromEDaysOfWeek(icu::Calendar::EDaysOfWeek eDaysOfWeek) {
} // namespace
-bool JSLocale::Is38AlphaNumList(const std::string& value) {
- std::size_t found_dash = value.find("-");
- std::size_t found_underscore = value.find("_");
- if (found_dash == std::string::npos &&
- found_underscore == std::string::npos) {
- return IsAlphanum(value, 3, 8);
- }
- if (found_underscore == std::string::npos || found_dash < found_underscore) {
- return IsAlphanum(value.substr(0, found_dash), 3, 8) &&
- JSLocale::Is38AlphaNumList(value.substr(found_dash + 1));
+// Implemented as iteration instead of recursion to avoid stack overflow for
+// very long input strings.
+bool JSLocale::Is38AlphaNumList(const std::string& in) {
+ std::string value = in;
+ while (true) {
+ std::size_t found_dash = value.find("-");
+ std::size_t found_underscore = value.find("_");
+ if (found_dash == std::string::npos &&
+ found_underscore == std::string::npos) {
+ return IsAlphanum(value, 3, 8);
+ }
+ if (found_underscore == std::string::npos ||
+ found_dash < found_underscore) {
+ if (!IsAlphanum(value.substr(0, found_dash), 3, 8)) return false;
+ value = value.substr(found_dash + 1);
+ } else {
+ if (!IsAlphanum(value.substr(0, found_underscore), 3, 8)) return false;
+ value = value.substr(found_underscore + 1);
+ }
}
- return IsAlphanum(value.substr(0, found_underscore), 3, 8) &&
- JSLocale::Is38AlphaNumList(value.substr(found_underscore + 1));
}
bool JSLocale::Is3Alpha(const std::string& value) {
@@ -476,57 +483,13 @@ MaybeHandle<JSLocale> JSLocale::Minimize(Isolate* isolate,
return Construct(isolate, result);
}
-MaybeHandle<JSArray> ToJSArray(Isolate* isolate, const char* unicode_key,
- icu::StringEnumeration* enumeration,
- const std::set<std::string>& removes) {
- UErrorCode status = U_ZERO_ERROR;
- Factory* factory = isolate->factory();
-
- int32_t count = 0;
- if (!removes.empty()) {
- // If we may remove items, then we need to go one pass first to count how
- // many items we will insert before we allocate the fixed array.
- for (const char* item = enumeration->next(nullptr, status);
- U_SUCCESS(status) && item != nullptr;
- item = enumeration->next(nullptr, status)) {
- if (unicode_key != nullptr) {
- item = uloc_toUnicodeLocaleType(unicode_key, item);
- }
- if (removes.find(item) == removes.end()) {
- count++;
- }
- }
- enumeration->reset(status);
- } else {
- count = enumeration->count(status);
- }
- Handle<FixedArray> fixed_array = factory->NewFixedArray(count);
-
- int32_t index = 0;
- for (const char* item = enumeration->next(nullptr, status);
- U_SUCCESS(status) && item != nullptr;
- item = enumeration->next(nullptr, status)) {
- if (unicode_key != nullptr) {
- item = uloc_toUnicodeLocaleType(unicode_key, item);
- }
- if (removes.find(item) != removes.end()) {
- continue;
- }
- Handle<String> str = factory->NewStringFromAsciiChecked(item);
- fixed_array->set(index++, *str);
- }
- CHECK(index == count);
- if (U_FAILURE(status)) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
- JSArray);
- }
- return factory->NewJSArrayWithElements(fixed_array);
-}
-
template <typename T>
-MaybeHandle<JSArray> GetKeywordValuesFromLocale(
- Isolate* isolate, const char* key, const char* unicode_key,
- const icu::Locale& locale, const std::set<std::string>& removes) {
+MaybeHandle<JSArray> GetKeywordValuesFromLocale(Isolate* isolate,
+ const char* key,
+ const char* unicode_key,
+ const icu::Locale& locale,
+ bool (*removes)(const char*),
+ bool commonly_used, bool sort) {
Factory* factory = isolate->factory();
UErrorCode status = U_ZERO_ERROR;
std::string ext =
@@ -539,27 +502,43 @@ MaybeHandle<JSArray> GetKeywordValuesFromLocale(
}
status = U_ZERO_ERROR;
std::unique_ptr<icu::StringEnumeration> enumeration(
- T::getKeywordValuesForLocale(key, locale, true, status));
+ T::getKeywordValuesForLocale(key, locale, commonly_used, status));
if (U_FAILURE(status)) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
JSArray);
}
- return ToJSArray(isolate, unicode_key, enumeration.get(), removes);
+ return Intl::ToJSArray(isolate, unicode_key, enumeration.get(), removes,
+ sort);
}
+namespace {
+
+MaybeHandle<JSArray> CalendarsForLocale(Isolate* isolate,
+ const icu::Locale& icu_locale,
+ bool commonly_used, bool sort) {
+ return GetKeywordValuesFromLocale<icu::Calendar>(
+ isolate, "calendar", "ca", icu_locale, nullptr, commonly_used, sort);
+}
+
+} // namespace
+
MaybeHandle<JSArray> JSLocale::Calendars(Isolate* isolate,
Handle<JSLocale> locale) {
icu::Locale icu_locale(*(locale->icu_locale().raw()));
- return GetKeywordValuesFromLocale<icu::Calendar>(
- isolate, "calendar", "ca", icu_locale, std::set<std::string>());
+ return CalendarsForLocale(isolate, icu_locale, true, false);
+}
+
+MaybeHandle<JSArray> Intl::AvailableCalendars(Isolate* isolate) {
+ icu::Locale icu_locale("und");
+ return CalendarsForLocale(isolate, icu_locale, false, true);
}
MaybeHandle<JSArray> JSLocale::Collations(Isolate* isolate,
Handle<JSLocale> locale) {
icu::Locale icu_locale(*(locale->icu_locale().raw()));
- const std::set<std::string> removes({"standard", "search"});
- return GetKeywordValuesFromLocale<icu::Collator>(isolate, "collations", "co",
- icu_locale, removes);
+ return GetKeywordValuesFromLocale<icu::Collator>(
+ isolate, "collations", "co", icu_locale, Intl::RemoveCollation, true,
+ false);
}
MaybeHandle<JSArray> JSLocale::HourCycles(Isolate* isolate,
@@ -688,8 +667,7 @@ MaybeHandle<Object> JSLocale::TimeZones(Isolate* isolate,
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
JSArray);
}
- return ToJSArray(isolate, nullptr, enumeration.get(),
- std::set<std::string>());
+ return Intl::ToJSArray(isolate, nullptr, enumeration.get(), nullptr, true);
}
MaybeHandle<JSObject> JSLocale::TextInfo(Isolate* isolate,
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
index cc5b77a005..cf093f7fa5 100644
--- a/deps/v8/src/objects/js-number-format.cc
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -173,27 +173,11 @@ std::map<const std::string, icu::MeasureUnit> CreateUnitMap() {
int32_t total = icu::MeasureUnit::getAvailable(nullptr, 0, status);
CHECK(U_FAILURE(status));
status = U_ZERO_ERROR;
- // See the list in ecma402 #sec-issanctionedsimpleunitidentifier
- std::set<std::string> sanctioned(
- {"acre", "bit", "byte",
- "celsius", "centimeter", "day",
- "degree", "fahrenheit", "fluid-ounce",
- "foot", "gallon", "gigabit",
- "gigabyte", "gram", "hectare",
- "hour", "inch", "kilobit",
- "kilobyte", "kilogram", "kilometer",
- "liter", "megabit", "megabyte",
- "meter", "mile", "mile-scandinavian",
- "millimeter", "milliliter", "millisecond",
- "minute", "month", "ounce",
- "percent", "petabyte", "pound",
- "second", "stone", "terabit",
- "terabyte", "week", "yard",
- "year"});
std::vector<icu::MeasureUnit> units(total);
total = icu::MeasureUnit::getAvailable(units.data(), total, status);
CHECK(U_SUCCESS(status));
std::map<const std::string, icu::MeasureUnit> map;
+ std::set<std::string> sanctioned(Intl::SanctionedSimpleUnits());
for (auto it = units.begin(); it != units.end(); ++it) {
// Need to skip none/percent
if (sanctioned.count(it->getSubtype()) > 0 &&
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index 6be8267a55..c35999592a 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -31,25 +31,22 @@ namespace internal {
#include "torque-generated/src/objects/js-objects-tq-inl.inc"
-OBJECT_CONSTRUCTORS_IMPL(JSReceiver, HeapObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSReceiver)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSCustomElementsObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSSpecialObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSAsyncFromSyncIterator)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSDate)
-OBJECT_CONSTRUCTORS_IMPL(JSGlobalObject, JSSpecialObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSGlobalObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSGlobalProxy)
JSIteratorResult::JSIteratorResult(Address ptr) : JSObject(ptr) {}
-OBJECT_CONSTRUCTORS_IMPL(JSMessageObject, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSMessageObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSPrimitiveWrapper)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSStringIterator)
NEVER_READ_ONLY_SPACE_IMPL(JSReceiver)
-CAST_ACCESSOR(JSGlobalObject)
CAST_ACCESSOR(JSIteratorResult)
-CAST_ACCESSOR(JSMessageObject)
-CAST_ACCESSOR(JSReceiver)
DEF_GETTER(JSObject, elements, FixedArrayBase) {
return TaggedField<FixedArrayBase, kElementsOffset>::load(cage_base, *this);
@@ -472,9 +469,6 @@ void JSObject::InitializeBody(Map map, int start_offset,
}
}
-ACCESSORS(JSGlobalObject, native_context, NativeContext, kNativeContextOffset)
-ACCESSORS(JSGlobalObject, global_proxy, JSGlobalProxy, kGlobalProxyOffset)
-
DEF_GETTER(JSGlobalObject, native_context_unchecked, Object) {
return TaggedField<Object, kNativeContextOffset>::load(cage_base, *this);
}
@@ -501,9 +495,6 @@ void JSMessageObject::set_type(MessageTemplate value) {
set_raw_type(static_cast<int>(value));
}
-ACCESSORS(JSMessageObject, argument, Object, kArgumentsOffset)
-ACCESSORS(JSMessageObject, script, Script, kScriptOffset)
-ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
ACCESSORS(JSMessageObject, shared_info, HeapObject, kSharedInfoOffset)
ACCESSORS(JSMessageObject, bytecode_offset, Smi, kBytecodeOffsetOffset)
SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index 7452237006..d20cdaceb4 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -34,7 +34,7 @@ class IsCompiledScope;
// JSReceiver includes types on which properties can be defined, i.e.,
// JSObject and JSProxy.
-class JSReceiver : public HeapObject {
+class JSReceiver : public TorqueGeneratedJSReceiver<JSReceiver, HeapObject> {
public:
NEVER_READ_ONLY_SPACE
// Returns true if there is no slow (ie, dictionary) backing store.
@@ -85,9 +85,6 @@ class JSReceiver : public HeapObject {
static void DeleteNormalizedProperty(Handle<JSReceiver> object,
InternalIndex entry);
- DECL_CAST(JSReceiver)
- DECL_VERIFIER(JSReceiver)
-
// ES6 section 7.1.1 ToPrimitive
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ToPrimitive(
Handle<JSReceiver> receiver,
@@ -288,14 +285,17 @@ class JSReceiver : public HeapObject {
static const int kHashMask = PropertyArray::HashField::kMask;
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_JS_RECEIVER_FIELDS)
bool HasProxyInPrototype(Isolate* isolate);
// TC39 "Dynamic Code Brand Checks"
bool IsCodeLike(Isolate* isolate) const;
- OBJECT_CONSTRUCTORS(JSReceiver, HeapObject);
+ private:
+ // Hide generated accessors; custom accessors are called
+ // "raw_properties_or_hash".
+ DECL_ACCESSORS(properties_or_hash, Object)
+
+ TQ_OBJECT_CONSTRUCTORS(JSReceiver)
};
// The JSObject describes real heap allocated JavaScript objects with
@@ -996,21 +996,14 @@ class JSGlobalProxy
};
// JavaScript global object.
-class JSGlobalObject : public JSSpecialObject {
+class JSGlobalObject
+ : public TorqueGeneratedJSGlobalObject<JSGlobalObject, JSSpecialObject> {
public:
- // [native context]: the natives corresponding to this global object.
- DECL_ACCESSORS(native_context, NativeContext)
-
- // [global proxy]: the global proxy object of the context
- DECL_ACCESSORS(global_proxy, JSGlobalProxy)
-
DECL_RELEASE_ACQUIRE_ACCESSORS(global_dictionary, GlobalDictionary)
static void InvalidatePropertyCell(Handle<JSGlobalObject> object,
Handle<Name> name);
- DECL_CAST(JSGlobalObject)
-
inline bool IsDetached();
// May be called by the concurrent GC when the global object is not
@@ -1021,11 +1014,7 @@ class JSGlobalObject : public JSSpecialObject {
DECL_PRINTER(JSGlobalObject)
DECL_VERIFIER(JSGlobalObject)
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSSpecialObject::kHeaderSize,
- TORQUE_GENERATED_JS_GLOBAL_OBJECT_FIELDS)
-
- OBJECT_CONSTRUCTORS(JSGlobalObject, JSSpecialObject);
+ TQ_OBJECT_CONSTRUCTORS(JSGlobalObject)
};
// Representation for JS Wrapper objects, String, Number, Boolean, etc.
@@ -1113,21 +1102,13 @@ class JSDate : public TorqueGeneratedJSDate<JSDate, JSObject> {
// error messages are not directly accessible from JavaScript to
// prevent leaking information to user code called during error
// formatting.
-class JSMessageObject : public JSObject {
+class JSMessageObject
+ : public TorqueGeneratedJSMessageObject<JSMessageObject, JSObject> {
public:
// [type]: the type of error message.
inline MessageTemplate type() const;
inline void set_type(MessageTemplate value);
- // [arguments]: the arguments for formatting the error message.
- DECL_ACCESSORS(argument, Object)
-
- // [script]: the script from which the error message originated.
- DECL_ACCESSORS(script, Script)
-
- // [stack_frames]: an array of stack frames for this error object.
- DECL_ACCESSORS(stack_frames, Object)
-
// Initializes the source positions in the object if possible. Does nothing if
// called more than once. If called when stack space is exhausted, then the
// source positions will be not be set and calling it again when there is more
@@ -1159,14 +1140,9 @@ class JSMessageObject : public JSObject {
DECL_INT_ACCESSORS(error_level)
- DECL_CAST(JSMessageObject)
-
// Dispatched behavior.
DECL_PRINTER(JSMessageObject)
- DECL_VERIFIER(JSMessageObject)
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_MESSAGE_OBJECT_FIELDS)
// TODO(v8:8989): [torque] Support marker constants.
static const int kPointerFieldsEndOffset = kStartPositionOffset;
@@ -1195,7 +1171,10 @@ class JSMessageObject : public JSObject {
DECL_INT_ACCESSORS(raw_type)
- OBJECT_CONSTRUCTORS(JSMessageObject, JSObject);
+ // Hide generated accessors; custom accessors are named "raw_type".
+ DECL_INT_ACCESSORS(message_type)
+
+ TQ_OBJECT_CONSTRUCTORS(JSMessageObject)
};
// The [Async-from-Sync Iterator] object
diff --git a/deps/v8/src/objects/js-objects.tq b/deps/v8/src/objects/js-objects.tq
index fd48d43045..1ce7dbd9ea 100644
--- a/deps/v8/src/objects/js-objects.tq
+++ b/deps/v8/src/objects/js-objects.tq
@@ -5,7 +5,6 @@
// JSReceiver corresponds to objects in the JS sense.
@abstract
@highestInstanceTypeWithinParentClassRange
-@doNotGenerateCppClass
extern class JSReceiver extends HeapObject {
properties_or_hash: SwissNameDictionary|FixedArrayBase|PropertyArray|Smi;
}
@@ -97,20 +96,24 @@ extern class JSGlobalProxy extends JSSpecialObject {
native_context: Object;
}
-@doNotGenerateCppClass
extern class JSGlobalObject extends JSSpecialObject {
+ // [native context]: the natives corresponding to this global object.
native_context: NativeContext;
+
+ // [global proxy]: the global proxy object of the context
global_proxy: JSGlobalProxy;
}
extern class JSPrimitiveWrapper extends JSCustomElementsObject { value: JSAny; }
-@doNotGenerateCppClass
extern class JSMessageObject extends JSObject {
// Tagged fields.
message_type: Smi;
- arguments: Object;
+ // [argument]: the arguments for formatting the error message.
+ argument: Object;
+ // [script]: the script from which the error message originated.
script: Script;
+ // [stack_frames]: an array of stack frames for this error object.
stack_frames: Object;
shared_info: SharedFunctionInfo|Undefined;
diff --git a/deps/v8/src/objects/js-promise.h b/deps/v8/src/objects/js-promise.h
index dda3afec99..5afb66a0b2 100644
--- a/deps/v8/src/objects/js-promise.h
+++ b/deps/v8/src/objects/js-promise.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_JS_PROMISE_H_
#define V8_OBJECTS_JS_PROMISE_H_
+#include "include/v8-promise.h"
#include "src/objects/js-objects.h"
#include "src/objects/promise.h"
#include "torque-generated/bit-fields.h"
diff --git a/deps/v8/src/objects/js-proxy.h b/deps/v8/src/objects/js-proxy.h
index 575c942651..c865b1ffd5 100644
--- a/deps/v8/src/objects/js-proxy.h
+++ b/deps/v8/src/objects/js-proxy.h
@@ -124,12 +124,10 @@ class JSProxy : public TorqueGeneratedJSProxy<JSProxy, JSReceiver> {
// JSProxyRevocableResult is just a JSObject with a specific initial map.
// This initial map adds in-object properties for "proxy" and "revoke".
// See https://tc39.github.io/ecma262/#sec-proxy.revocable
-class JSProxyRevocableResult : public JSObject {
+class JSProxyRevocableResult
+ : public TorqueGeneratedJSProxyRevocableResult<JSProxyRevocableResult,
+ JSObject> {
public:
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSObject::kHeaderSize, TORQUE_GENERATED_JS_PROXY_REVOCABLE_RESULT_FIELDS)
-
// Indices of in-object properties.
static const int kProxyIndex = 0;
static const int kRevokeIndex = 1;
diff --git a/deps/v8/src/objects/js-proxy.tq b/deps/v8/src/objects/js-proxy.tq
index b91c0de5d0..5d0f51a94f 100644
--- a/deps/v8/src/objects/js-proxy.tq
+++ b/deps/v8/src/objects/js-proxy.tq
@@ -7,7 +7,6 @@ extern class JSProxy extends JSReceiver {
handler: JSReceiver|Null;
}
-@doNotGenerateCppClass
extern shape JSProxyRevocableResult extends JSObject {
proxy: JSAny;
revoke: JSAny;
diff --git a/deps/v8/src/objects/js-regexp-inl.h b/deps/v8/src/objects/js-regexp-inl.h
index 0f38daa5e7..2a69bea650 100644
--- a/deps/v8/src/objects/js-regexp-inl.h
+++ b/deps/v8/src/objects/js-regexp-inl.h
@@ -21,17 +21,9 @@ namespace internal {
#include "torque-generated/src/objects/js-regexp-tq-inl.inc"
TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExp)
-OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER(JSRegExpResult, JSArray)
-OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER(JSRegExpResultIndices, JSArray)
-
-inline JSRegExpResultWithIndices::JSRegExpResultWithIndices(Address ptr)
- : JSRegExpResult(ptr) {
- SLOW_DCHECK(IsJSArray());
-}
-
-CAST_ACCESSOR(JSRegExpResult)
-CAST_ACCESSOR(JSRegExpResultWithIndices)
-CAST_ACCESSOR(JSRegExpResultIndices)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExpResult)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExpResultIndices)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExpResultWithIndices)
ACCESSORS(JSRegExp, last_index, Object, kLastIndexOffset)
@@ -59,7 +51,7 @@ int JSRegExp::MaxRegisterCount() const {
return Smi::ToInt(DataAt(kIrregexpMaxRegisterCountIndex));
}
-JSRegExp::Flags JSRegExp::GetFlags() {
+JSRegExp::Flags JSRegExp::GetFlags() const {
DCHECK(this->data().IsFixedArray());
Object data = this->data();
Smi smi = Smi::cast(FixedArray::cast(data).get(kFlagsIndex));
diff --git a/deps/v8/src/objects/js-regexp.cc b/deps/v8/src/objects/js-regexp.cc
index bfc16d1b85..e1e06cb12a 100644
--- a/deps/v8/src/objects/js-regexp.cc
+++ b/deps/v8/src/objects/js-regexp.cc
@@ -111,64 +111,38 @@ uint32_t JSRegExp::BacktrackLimit() const {
}
// static
-JSRegExp::Flags JSRegExp::FlagsFromString(Isolate* isolate,
- Handle<String> flags, bool* success) {
- int length = flags->length();
- if (length == 0) {
- *success = true;
- return JSRegExp::kNone;
- }
+base::Optional<JSRegExp::Flags> JSRegExp::FlagsFromString(
+ Isolate* isolate, Handle<String> flags) {
+ const int length = flags->length();
+
// A longer flags string cannot be valid.
- if (length > JSRegExp::kFlagCount) return JSRegExp::Flags(0);
- JSRegExp::Flags value(0);
- if (flags->IsSeqOneByteString()) {
- DisallowGarbageCollection no_gc;
- SeqOneByteString seq_flags = SeqOneByteString::cast(*flags);
- for (int i = 0; i < length; i++) {
- base::Optional<JSRegExp::Flag> maybe_flag =
- JSRegExp::FlagFromChar(seq_flags.Get(i));
- if (!maybe_flag.has_value()) return JSRegExp::Flags(0);
- JSRegExp::Flag flag = *maybe_flag;
- // Duplicate flag.
- if (value & flag) return JSRegExp::Flags(0);
- value |= flag;
- }
- } else {
- flags = String::Flatten(isolate, flags);
- DisallowGarbageCollection no_gc;
- String::FlatContent flags_content = flags->GetFlatContent(no_gc);
- for (int i = 0; i < length; i++) {
- base::Optional<JSRegExp::Flag> maybe_flag =
- JSRegExp::FlagFromChar(flags_content.Get(i));
- if (!maybe_flag.has_value()) return JSRegExp::Flags(0);
- JSRegExp::Flag flag = *maybe_flag;
- // Duplicate flag.
- if (value & flag) return JSRegExp::Flags(0);
- value |= flag;
- }
+ if (length > JSRegExp::kFlagCount) return {};
+
+ RegExpFlags value;
+ FlatStringReader reader(isolate, String::Flatten(isolate, flags));
+
+ for (int i = 0; i < length; i++) {
+ base::Optional<RegExpFlag> flag = JSRegExp::FlagFromChar(reader.Get(i));
+ if (!flag.has_value()) return {};
+ if (value & flag.value()) return {}; // Duplicate.
+ value |= flag.value();
}
- *success = true;
- return value;
+
+ return JSRegExp::AsJSRegExpFlags(value);
}
// static
Handle<String> JSRegExp::StringFromFlags(Isolate* isolate,
JSRegExp::Flags flags) {
- // Ensure that this function is up-to-date with the supported flag options.
- constexpr size_t kFlagCount = JSRegExp::kFlagCount;
- STATIC_ASSERT(kFlagCount == 8);
-
- // Translate to the lexicographically smaller string.
+ static constexpr int kStringTerminator = 1;
int cursor = 0;
- char buffer[kFlagCount] = {'\0'};
- if (flags & JSRegExp::kHasIndices) buffer[cursor++] = 'd';
- if (flags & JSRegExp::kGlobal) buffer[cursor++] = 'g';
- if (flags & JSRegExp::kIgnoreCase) buffer[cursor++] = 'i';
- if (flags & JSRegExp::kLinear) buffer[cursor++] = 'l';
- if (flags & JSRegExp::kMultiline) buffer[cursor++] = 'm';
- if (flags & JSRegExp::kDotAll) buffer[cursor++] = 's';
- if (flags & JSRegExp::kUnicode) buffer[cursor++] = 'u';
- if (flags & JSRegExp::kSticky) buffer[cursor++] = 'y';
+ char buffer[kFlagCount + kStringTerminator];
+#define V(Lower, Camel, LowerCamel, Char, Bit) \
+ if (flags & JSRegExp::k##Camel) buffer[cursor++] = Char;
+ REGEXP_FLAG_LIST(V)
+#undef V
+ buffer[cursor++] = '\0';
+ DCHECK_LE(cursor, kFlagCount + kStringTerminator);
return isolate->factory()->NewStringFromAsciiChecked(buffer);
}
@@ -247,15 +221,15 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
Handle<String> source,
Handle<String> flags_string) {
Isolate* isolate = regexp->GetIsolate();
- bool success = false;
- Flags flags = JSRegExp::FlagsFromString(isolate, flags_string, &success);
- if (!success) {
+ base::Optional<Flags> flags =
+ JSRegExp::FlagsFromString(isolate, flags_string);
+ if (!flags.has_value()) {
THROW_NEW_ERROR(
isolate,
NewSyntaxError(MessageTemplate::kInvalidRegExpFlags, flags_string),
JSRegExp);
}
- return Initialize(regexp, source, flags);
+ return Initialize(regexp, source, flags.value());
}
namespace {
@@ -417,7 +391,9 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
source = String::Flatten(isolate, source);
RETURN_ON_EXCEPTION(
- isolate, RegExp::Compile(isolate, regexp, source, flags, backtrack_limit),
+ isolate,
+ RegExp::Compile(isolate, regexp, source, JSRegExp::AsRegExpFlags(flags),
+ backtrack_limit),
JSRegExp);
Handle<String> escaped_source;
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index 029964faa2..4671f6607b 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -5,8 +5,10 @@
#ifndef V8_OBJECTS_JS_REGEXP_H_
#define V8_OBJECTS_JS_REGEXP_H_
+#include "include/v8-regexp.h"
#include "src/objects/contexts.h"
#include "src/objects/js-array.h"
+#include "src/regexp/regexp-flags.h"
#include "torque-generated/bit-fields.h"
// Has to be the last include (doesn't have include guards):
@@ -43,32 +45,35 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
enum Type { NOT_COMPILED, ATOM, IRREGEXP, EXPERIMENTAL };
DEFINE_TORQUE_GENERATED_JS_REG_EXP_FLAGS()
- static base::Optional<Flag> FlagFromChar(char c) {
- STATIC_ASSERT(kFlagCount == 8);
- // clang-format off
- return c == 'g' ? base::Optional<Flag>(kGlobal)
- : c == 'i' ? base::Optional<Flag>(kIgnoreCase)
- : c == 'm' ? base::Optional<Flag>(kMultiline)
- : c == 'y' ? base::Optional<Flag>(kSticky)
- : c == 'u' ? base::Optional<Flag>(kUnicode)
- : c == 's' ? base::Optional<Flag>(kDotAll)
- : c == 'd' ? base::Optional<Flag>(kHasIndices)
- : (FLAG_enable_experimental_regexp_engine && c == 'l')
- ? base::Optional<Flag>(kLinear)
- : base::Optional<Flag>();
- // clang-format on
+ static constexpr Flag AsJSRegExpFlag(RegExpFlag f) {
+ return static_cast<Flag>(f);
+ }
+ static constexpr Flags AsJSRegExpFlags(RegExpFlags f) {
+ return Flags{static_cast<int>(f)};
+ }
+ static constexpr RegExpFlags AsRegExpFlags(Flags f) {
+ return RegExpFlags{static_cast<int>(f)};
+ }
+
+ static base::Optional<RegExpFlag> FlagFromChar(char c) {
+ base::Optional<RegExpFlag> f = TryRegExpFlagFromChar(c);
+ if (!f.has_value()) return f;
+ if (f.value() == RegExpFlag::kLinear &&
+ !FLAG_enable_experimental_regexp_engine) {
+ return {};
+ }
+ return f;
}
STATIC_ASSERT(static_cast<int>(kNone) == v8::RegExp::kNone);
- STATIC_ASSERT(static_cast<int>(kGlobal) == v8::RegExp::kGlobal);
- STATIC_ASSERT(static_cast<int>(kIgnoreCase) == v8::RegExp::kIgnoreCase);
- STATIC_ASSERT(static_cast<int>(kMultiline) == v8::RegExp::kMultiline);
- STATIC_ASSERT(static_cast<int>(kSticky) == v8::RegExp::kSticky);
- STATIC_ASSERT(static_cast<int>(kUnicode) == v8::RegExp::kUnicode);
- STATIC_ASSERT(static_cast<int>(kDotAll) == v8::RegExp::kDotAll);
- STATIC_ASSERT(static_cast<int>(kLinear) == v8::RegExp::kLinear);
- STATIC_ASSERT(static_cast<int>(kHasIndices) == v8::RegExp::kHasIndices);
+#define V(_, Camel, ...) \
+ STATIC_ASSERT(static_cast<int>(k##Camel) == v8::RegExp::k##Camel); \
+ STATIC_ASSERT(static_cast<int>(k##Camel) == \
+ static_cast<int>(RegExpFlag::k##Camel));
+ REGEXP_FLAG_LIST(V)
+#undef V
STATIC_ASSERT(kFlagCount == v8::RegExp::kFlagCount);
+ STATIC_ASSERT(kFlagCount == kRegExpFlagCount);
DECL_ACCESSORS(last_index, Object)
@@ -86,8 +91,8 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
Handle<String> source,
Handle<String> flags_string);
- static Flags FlagsFromString(Isolate* isolate, Handle<String> flags,
- bool* success);
+ static base::Optional<Flags> FlagsFromString(Isolate* isolate,
+ Handle<String> flags);
V8_EXPORT_PRIVATE static Handle<String> StringFromFlags(Isolate* isolate,
Flags flags);
@@ -112,7 +117,7 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
static int RegistersForCaptureCount(int count) { return (count + 1) * 2; }
inline int MaxRegisterCount() const;
- inline Flags GetFlags();
+ inline Flags GetFlags() const;
inline String Pattern();
inline String EscapedPattern();
inline Object CaptureNameMap();
@@ -249,18 +254,13 @@ DEFINE_OPERATORS_FOR_FLAGS(JSRegExp::Flags)
// faster creation of RegExp exec results.
// This class just holds constants used when creating the result.
// After creation the result must be treated as a JSArray in all regards.
-class JSRegExpResult : public JSArray {
+class JSRegExpResult
+ : public TorqueGeneratedJSRegExpResult<JSRegExpResult, JSArray> {
public:
- DECL_CAST(JSRegExpResult)
-
// TODO(joshualitt): We would like to add printers and verifiers to
// JSRegExpResult, and maybe JSRegExpResultIndices, but both have the same
// instance type as JSArray.
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSArray::kHeaderSize,
- TORQUE_GENERATED_JS_REG_EXP_RESULT_FIELDS)
-
// Indices of in-object properties.
static const int kIndexIndex = 0;
static const int kInputIndex = 1;
@@ -274,25 +274,20 @@ class JSRegExpResult : public JSArray {
static const int kMapIndexInContext = Context::REGEXP_RESULT_MAP_INDEX;
- OBJECT_CONSTRUCTORS(JSRegExpResult, JSArray);
+ TQ_OBJECT_CONSTRUCTORS(JSRegExpResult)
};
-class JSRegExpResultWithIndices : public JSRegExpResult {
+class JSRegExpResultWithIndices
+ : public TorqueGeneratedJSRegExpResultWithIndices<JSRegExpResultWithIndices,
+ JSRegExpResult> {
public:
- DECL_CAST(JSRegExpResultWithIndices)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSRegExpResult::kSize,
- TORQUE_GENERATED_JS_REG_EXP_RESULT_WITH_INDICES_FIELDS)
-
static_assert(
JSRegExpResult::kInObjectPropertyCount == 6,
"JSRegExpResultWithIndices must be a subclass of JSRegExpResult");
static const int kIndicesIndex = 6;
static const int kInObjectPropertyCount = 7;
- OBJECT_CONSTRUCTORS(JSRegExpResultWithIndices, JSRegExpResult);
+ TQ_OBJECT_CONSTRUCTORS(JSRegExpResultWithIndices)
};
// JSRegExpResultIndices is just a JSArray with a specific initial map.
@@ -301,14 +296,10 @@ class JSRegExpResultWithIndices : public JSRegExpResult {
// faster creation of RegExp exec results.
// This class just holds constants used when creating the result.
// After creation the result must be treated as a JSArray in all regards.
-class JSRegExpResultIndices : public JSArray {
+class JSRegExpResultIndices
+ : public TorqueGeneratedJSRegExpResultIndices<JSRegExpResultIndices,
+ JSArray> {
public:
- DECL_CAST(JSRegExpResultIndices)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSArray::kHeaderSize, TORQUE_GENERATED_JS_REG_EXP_RESULT_INDICES_FIELDS)
-
static Handle<JSRegExpResultIndices> BuildIndices(
Isolate* isolate, Handle<RegExpMatchInfo> match_info,
Handle<Object> maybe_names);
@@ -320,7 +311,7 @@ class JSRegExpResultIndices : public JSArray {
// Descriptor index of groups.
static const int kGroupsDescriptorIndex = 1;
- OBJECT_CONSTRUCTORS(JSRegExpResultIndices, JSArray);
+ TQ_OBJECT_CONSTRUCTORS(JSRegExpResultIndices)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-regexp.tq b/deps/v8/src/objects/js-regexp.tq
index 328dd94efb..7c60df214a 100644
--- a/deps/v8/src/objects/js-regexp.tq
+++ b/deps/v8/src/objects/js-regexp.tq
@@ -38,7 +38,6 @@ RegExpBuiltinsAssembler::FastStoreLastIndex(FastJSRegExp, Smi): void;
extern class JSRegExpConstructor extends JSFunction
generates 'TNode<JSFunction>';
-@doNotGenerateCppClass
extern shape JSRegExpResult extends JSArray {
// In-object properties:
// The below fields are externally exposed.
@@ -52,12 +51,10 @@ extern shape JSRegExpResult extends JSArray {
regexp_last_index: Smi;
}
-@doNotGenerateCppClass
extern shape JSRegExpResultWithIndices extends JSRegExpResult {
indices: JSAny;
}
-@doNotGenerateCppClass
extern shape JSRegExpResultIndices extends JSArray {
// In-object properties:
// The groups field is externally exposed.
diff --git a/deps/v8/src/objects/js-relative-time-format.cc b/deps/v8/src/objects/js-relative-time-format.cc
index caa4ce562d..34db9ad1bf 100644
--- a/deps/v8/src/objects/js-relative-time-format.cc
+++ b/deps/v8/src/objects/js-relative-time-format.cc
@@ -342,9 +342,9 @@ template <typename T>
MaybeHandle<T> FormatCommon(
Isolate* isolate, Handle<JSRelativeTimeFormat> format,
Handle<Object> value_obj, Handle<Object> unit_obj, const char* func_name,
- MaybeHandle<T> (*formatToResult)(Isolate*,
- const icu::FormattedRelativeDateTime&,
- Handle<Object>, Handle<String>)) {
+ const std::function<
+ MaybeHandle<T>(Isolate*, const icu::FormattedRelativeDateTime&,
+ Handle<Object>, Handle<String>)>& formatToResult) {
// 3. Let value be ? ToNumber(value).
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
diff --git a/deps/v8/src/objects/js-weak-refs-inl.h b/deps/v8/src/objects/js-weak-refs-inl.h
index 13ac175cf6..acce7b72b9 100644
--- a/deps/v8/src/objects/js-weak-refs-inl.h
+++ b/deps/v8/src/objects/js-weak-refs-inl.h
@@ -21,18 +21,7 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(WeakCell)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSWeakRef)
-OBJECT_CONSTRUCTORS_IMPL(JSFinalizationRegistry, JSObject)
-
-ACCESSORS(JSFinalizationRegistry, native_context, NativeContext,
- kNativeContextOffset)
-ACCESSORS(JSFinalizationRegistry, cleanup, Object, kCleanupOffset)
-ACCESSORS(JSFinalizationRegistry, active_cells, HeapObject, kActiveCellsOffset)
-ACCESSORS(JSFinalizationRegistry, cleared_cells, HeapObject,
- kClearedCellsOffset)
-ACCESSORS(JSFinalizationRegistry, key_map, Object, kKeyMapOffset)
-SMI_ACCESSORS(JSFinalizationRegistry, flags, kFlagsOffset)
-ACCESSORS(JSFinalizationRegistry, next_dirty, Object, kNextDirtyOffset)
-CAST_ACCESSOR(JSFinalizationRegistry)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSFinalizationRegistry)
BIT_FIELD_ACCESSORS(JSFinalizationRegistry, flags, scheduled_for_cleanup,
JSFinalizationRegistry::ScheduledForCleanupBit)
diff --git a/deps/v8/src/objects/js-weak-refs.h b/deps/v8/src/objects/js-weak-refs.h
index 250186e7be..b2dc41b570 100644
--- a/deps/v8/src/objects/js-weak-refs.h
+++ b/deps/v8/src/objects/js-weak-refs.h
@@ -21,22 +21,12 @@ class WeakCell;
// FinalizationRegistry object from the JS Weak Refs spec proposal:
// https://github.com/tc39/proposal-weakrefs
-class JSFinalizationRegistry : public JSObject {
+class JSFinalizationRegistry
+ : public TorqueGeneratedJSFinalizationRegistry<JSFinalizationRegistry,
+ JSObject> {
public:
DECL_PRINTER(JSFinalizationRegistry)
EXPORT_DECL_VERIFIER(JSFinalizationRegistry)
- DECL_CAST(JSFinalizationRegistry)
-
- DECL_ACCESSORS(native_context, NativeContext)
- DECL_ACCESSORS(cleanup, Object)
-
- DECL_ACCESSORS(active_cells, HeapObject)
- DECL_ACCESSORS(cleared_cells, HeapObject)
- DECL_ACCESSORS(key_map, Object)
-
- DECL_ACCESSORS(next_dirty, Object)
-
- DECL_INT_ACCESSORS(flags)
DECL_BOOLEAN_ACCESSORS(scheduled_for_cleanup)
@@ -72,14 +62,10 @@ class JSFinalizationRegistry : public JSObject {
Isolate* isolate, Address raw_finalization_registry,
Address raw_weak_cell);
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSObject::kHeaderSize, TORQUE_GENERATED_JS_FINALIZATION_REGISTRY_FIELDS)
-
// Bitfields in flags.
DEFINE_TORQUE_GENERATED_FINALIZATION_REGISTRY_FLAGS()
- OBJECT_CONSTRUCTORS(JSFinalizationRegistry, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSFinalizationRegistry)
};
// Internal object for storing weak references in JSFinalizationRegistry.
diff --git a/deps/v8/src/objects/js-weak-refs.tq b/deps/v8/src/objects/js-weak-refs.tq
index 36f3817ac7..c687ab5001 100644
--- a/deps/v8/src/objects/js-weak-refs.tq
+++ b/deps/v8/src/objects/js-weak-refs.tq
@@ -6,7 +6,6 @@ bitfield struct FinalizationRegistryFlags extends uint31 {
scheduled_for_cleanup: bool: 1 bit;
}
-@doNotGenerateCppClass
extern class JSFinalizationRegistry extends JSObject {
native_context: NativeContext;
cleanup: Callable;
diff --git a/deps/v8/src/objects/keys.h b/deps/v8/src/objects/keys.h
index 4abe2a5ad3..b1f539e233 100644
--- a/deps/v8/src/objects/keys.h
+++ b/deps/v8/src/objects/keys.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_KEYS_H_
#define V8_OBJECTS_KEYS_H_
+#include "include/v8-object.h"
#include "src/objects/hash-table.h"
#include "src/objects/js-objects.h"
#include "src/objects/objects.h"
@@ -17,6 +18,18 @@ class FastKeyAccumulator;
enum AddKeyConversion { DO_NOT_CONVERT, CONVERT_TO_ARRAY_INDEX };
+enum class GetKeysConversion {
+ kKeepNumbers = static_cast<int>(v8::KeyConversionMode::kKeepNumbers),
+ kConvertToString = static_cast<int>(v8::KeyConversionMode::kConvertToString),
+ kNoNumbers = static_cast<int>(v8::KeyConversionMode::kNoNumbers)
+};
+
+enum class KeyCollectionMode {
+ kOwnOnly = static_cast<int>(v8::KeyCollectionMode::kOwnOnly),
+ kIncludePrototypes =
+ static_cast<int>(v8::KeyCollectionMode::kIncludePrototypes)
+};
+
// This is a helper class for JSReceiver::GetKeys which collects and sorts keys.
// GetKeys needs to sort keys per prototype level, first showing the integer
// indices from elements then the strings from the properties. However, this
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 572b3f9299..dc37a119fa 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -466,6 +466,28 @@ void Map::AccountAddedOutOfObjectPropertyField(int unused_in_property_array) {
DCHECK_EQ(unused_in_property_array, UnusedPropertyFields());
}
+#if V8_ENABLE_WEBASSEMBLY
+uint8_t Map::WasmByte1() const {
+ DCHECK(IsWasmObjectMap());
+ return inobject_properties_start_or_constructor_function_index();
+}
+
+uint8_t Map::WasmByte2() const {
+ DCHECK(IsWasmObjectMap());
+ return used_or_unused_instance_size_in_words();
+}
+
+void Map::SetWasmByte1(uint8_t value) {
+ CHECK(IsWasmObjectMap());
+ set_inobject_properties_start_or_constructor_function_index(value);
+}
+
+void Map::SetWasmByte2(uint8_t value) {
+ CHECK(IsWasmObjectMap());
+ set_used_or_unused_instance_size_in_words(value);
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
byte Map::bit_field() const {
// TODO(solanes, v8:7790, v8:11353): Make this non-atomic when TSAN sees the
// map's store synchronization.
@@ -726,7 +748,7 @@ bool Map::ConcurrentIsMap(PtrComprCageBase cage_base,
}
DEF_GETTER(Map, GetBackPointer, HeapObject) {
- Object object = constructor_or_back_pointer(cage_base);
+ Object object = constructor_or_back_pointer(cage_base, kRelaxedLoad);
if (ConcurrentIsMap(cage_base, object)) {
return Map::cast(object);
}
@@ -754,6 +776,9 @@ ACCESSORS(Map, prototype_validity_cell, Object, kPrototypeValidityCellOffset)
ACCESSORS_CHECKED2(Map, constructor_or_back_pointer, Object,
kConstructorOrBackPointerOrNativeContextOffset,
!IsContextMap(), value.IsNull() || !IsContextMap())
+RELAXED_ACCESSORS_CHECKED2(Map, constructor_or_back_pointer, Object,
+ kConstructorOrBackPointerOrNativeContextOffset,
+ !IsContextMap(), value.IsNull() || !IsContextMap())
ACCESSORS_CHECKED(Map, native_context, NativeContext,
kConstructorOrBackPointerOrNativeContextOffset,
IsContextMap())
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 74d2a859e8..e649405091 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -565,6 +565,7 @@ class Map : public TorqueGeneratedMap<Map, HeapObject> {
// The field also overlaps with the native context pointer for context maps,
// and with the Wasm type info for WebAssembly object maps.
DECL_ACCESSORS(constructor_or_back_pointer, Object)
+ DECL_RELAXED_ACCESSORS(constructor_or_back_pointer, Object)
DECL_ACCESSORS(native_context, NativeContext)
DECL_ACCESSORS(native_context_or_null, Object)
DECL_ACCESSORS(wasm_type_info, WasmTypeInfo)
@@ -850,6 +851,12 @@ class Map : public TorqueGeneratedMap<Map, HeapObject> {
InstanceType instance_type);
inline bool CanHaveFastTransitionableElementsKind() const;
+ // Maps for Wasm objects can use certain fields for other purposes.
+ inline uint8_t WasmByte1() const;
+ inline uint8_t WasmByte2() const;
+ inline void SetWasmByte1(uint8_t value);
+ inline void SetWasmByte2(uint8_t value);
+
private:
// This byte encodes either the instance size without the in-object slack or
// the slack size in properties backing store.
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index 05ea04ccd9..5cb7e4bb7f 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_MODULE_H_
#define V8_OBJECTS_MODULE_H_
+#include "include/v8-script.h"
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
#include "src/objects/objects.h"
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index 1aa9dc10b4..f531ab0aa5 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -8,7 +8,6 @@
#undef OBJECT_CONSTRUCTORS
#undef OBJECT_CONSTRUCTORS_IMPL
-#undef OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER
#undef NEVER_READ_ONLY_SPACE
#undef NEVER_READ_ONLY_SPACE_IMPL
#undef DECL_PRIMITIVE_GETTER
@@ -40,7 +39,6 @@
#undef CAST_ACCESSOR
#undef INT_ACCESSORS
#undef INT32_ACCESSORS
-#undef IMPLICIT_TAG_RELAXED_INT32_ACCESSORS
#undef RELAXED_INT32_ACCESSORS
#undef UINT16_ACCESSORS
#undef UINT8_ACCESSORS
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index 561b1de30b..79cc79033e 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -30,11 +30,6 @@
#define OBJECT_CONSTRUCTORS_IMPL(Type, Super) \
inline Type::Type(Address ptr) : Super(ptr) { SLOW_DCHECK(Is##Type()); }
-// In these cases, we don't have our own instance type to check, so check the
-// supertype instead. This happens for types denoting a NativeContext-dependent
-// set of maps.
-#define OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER(Type, Super) \
- inline Type::Type(Address ptr) : Super(ptr) { SLOW_DCHECK(Is##Super()); }
#define NEVER_READ_ONLY_SPACE \
inline Heap* GetHeap() const; \
@@ -163,15 +158,6 @@
int32_t holder::name() const { return ReadField<int32_t>(offset); } \
void holder::set_##name(int32_t value) { WriteField<int32_t>(offset, value); }
-// TODO(solanes): Use the non-implicit one, and change the uses to use the tag.
-#define IMPLICIT_TAG_RELAXED_INT32_ACCESSORS(holder, name, offset) \
- int32_t holder::name() const { \
- return RELAXED_READ_INT32_FIELD(*this, offset); \
- } \
- void holder::set_##name(int32_t value) { \
- RELAXED_WRITE_INT32_FIELD(*this, offset, value); \
- }
-
#define RELAXED_INT32_ACCESSORS(holder, name, offset) \
int32_t holder::name(RelaxedLoadTag) const { \
return RELAXED_READ_INT32_FIELD(*this, offset); \
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index 7750b26575..838b0536e2 100644
--- a/deps/v8/src/objects/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -727,7 +727,7 @@ class WasmArray::BodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map map, HeapObject object) {
- return WasmArray::GcSafeSizeFor(map, WasmArray::cast(object).length());
+ return WasmArray::SizeFor(map, WasmArray::cast(object).length());
}
};
@@ -800,8 +800,8 @@ class CoverageInfo::BodyDescriptor final : public BodyDescriptorBase {
class Code::BodyDescriptor final : public BodyDescriptorBase {
public:
STATIC_ASSERT(kRelocationInfoOffset + kTaggedSize ==
- kDeoptimizationDataOffset);
- STATIC_ASSERT(kDeoptimizationDataOffset + kTaggedSize ==
+ kDeoptimizationDataOrInterpreterDataOffset);
+ STATIC_ASSERT(kDeoptimizationDataOrInterpreterDataOffset + kTaggedSize ==
kPositionTableOffset);
STATIC_ASSERT(kPositionTableOffset + kTaggedSize == kCodeDataContainerOffset);
STATIC_ASSERT(kCodeDataContainerOffset + kTaggedSize == kDataStart);
diff --git a/deps/v8/src/objects/objects-definitions.h b/deps/v8/src/objects/objects-definitions.h
index 20ce96aae5..f70a469364 100644
--- a/deps/v8/src/objects/objects-definitions.h
+++ b/deps/v8/src/objects/objects-definitions.h
@@ -124,7 +124,6 @@ namespace internal {
IF_WASM(V, _, ASM_WASM_DATA_TYPE, AsmWasmData, asm_wasm_data) \
V(_, ASYNC_GENERATOR_REQUEST_TYPE, AsyncGeneratorRequest, \
async_generator_request) \
- V(_, BASELINE_DATA_TYPE, BaselineData, baseline_data) \
V(_, BREAK_POINT_TYPE, BreakPoint, break_point) \
V(_, BREAK_POINT_INFO_TYPE, BreakPointInfo, break_point_info) \
V(_, CACHED_TEMPLATE_OBJECT_TYPE, CachedTemplateObject, \
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
index 2f16615536..68482fe68f 100644
--- a/deps/v8/src/objects/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -197,6 +197,8 @@ std::ostream& operator<<(std::ostream& os, PropertyCellType type) {
return os << "ConstantType";
case PropertyCellType::kMutable:
return os << "Mutable";
+ case PropertyCellType::kInTransition:
+ return os << "InTransition";
}
UNREACHABLE();
}
@@ -2291,7 +2293,7 @@ int HeapObject::SizeFromMap(Map map) const {
return WasmStruct::GcSafeSize(map);
}
if (instance_type == WASM_ARRAY_TYPE) {
- return WasmArray::GcSafeSizeFor(map, WasmArray::cast(*this).length());
+ return WasmArray::SizeFor(map, WasmArray::cast(*this).length());
}
#endif // V8_ENABLE_WEBASSEMBLY
DCHECK_EQ(instance_type, EMBEDDER_DATA_ARRAY_TYPE);
@@ -6532,6 +6534,8 @@ PropertyCellType PropertyCell::UpdatedType(Isolate* isolate,
V8_FALLTHROUGH;
case PropertyCellType::kMutable:
return PropertyCellType::kMutable;
+ case PropertyCellType::kInTransition:
+ UNREACHABLE();
}
}
@@ -6587,6 +6591,7 @@ bool PropertyCell::CheckDataIsCompatible(PropertyDetails details,
Object value) {
DisallowGarbageCollection no_gc;
PropertyCellType cell_type = details.cell_type();
+ CHECK_NE(cell_type, PropertyCellType::kInTransition);
if (value.IsTheHole()) {
CHECK_EQ(cell_type, PropertyCellType::kConstant);
} else {
@@ -6620,8 +6625,9 @@ bool PropertyCell::CanTransitionTo(PropertyDetails new_details,
return new_details.cell_type() == PropertyCellType::kMutable ||
(new_details.cell_type() == PropertyCellType::kConstant &&
new_value.IsTheHole());
+ case PropertyCellType::kInTransition:
+ UNREACHABLE();
}
- UNREACHABLE();
}
#endif // DEBUG
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
index eb31ec957d..61bcf79800 100644
--- a/deps/v8/src/objects/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -9,7 +9,6 @@
#include <memory>
#include "include/v8-internal.h"
-#include "include/v8.h"
#include "include/v8config.h"
#include "src/base/bits.h"
#include "src/base/build_config.h"
@@ -840,18 +839,6 @@ enum EnsureElementsMode {
// Indicator for one component of an AccessorPair.
enum AccessorComponent { ACCESSOR_GETTER, ACCESSOR_SETTER };
-enum class GetKeysConversion {
- kKeepNumbers = static_cast<int>(v8::KeyConversionMode::kKeepNumbers),
- kConvertToString = static_cast<int>(v8::KeyConversionMode::kConvertToString),
- kNoNumbers = static_cast<int>(v8::KeyConversionMode::kNoNumbers)
-};
-
-enum class KeyCollectionMode {
- kOwnOnly = static_cast<int>(v8::KeyCollectionMode::kOwnOnly),
- kIncludePrototypes =
- static_cast<int>(v8::KeyCollectionMode::kIncludePrototypes)
-};
-
// Utility superclass for stack-allocated objects that must be updated
// on gc. It provides two ways for the gc to update instances, either
// iterating or updating after gc.
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index 1110352e46..45682e45e9 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -10,6 +10,7 @@
#include "src/objects/fixed-array.h"
#include "src/objects/internal-index.h"
#include "src/objects/js-objects.h"
+#include "src/objects/keys.h"
#include "src/objects/smi.h"
#include "src/roots/roots.h"
diff --git a/deps/v8/src/objects/property-cell-inl.h b/deps/v8/src/objects/property-cell-inl.h
index dfaaf1c80a..ef4fa75463 100644
--- a/deps/v8/src/objects/property-cell-inl.h
+++ b/deps/v8/src/objects/property-cell-inl.h
@@ -57,6 +57,9 @@ void PropertyCell::Transition(PropertyDetails new_details,
DCHECK(CanTransitionTo(new_details, *new_value));
// This code must be in sync with its counterpart in
// PropertyCellData::Serialize.
+ PropertyDetails transition_marker = new_details;
+ transition_marker.set_cell_type(PropertyCellType::kInTransition);
+ set_property_details_raw(transition_marker.AsSmi(), kReleaseStore);
set_value(*new_value, kReleaseStore);
set_property_details_raw(new_details.AsSmi(), kReleaseStore);
}
diff --git a/deps/v8/src/objects/property-details.h b/deps/v8/src/objects/property-details.h
index 58cc2359cb..f32d6ceb89 100644
--- a/deps/v8/src/objects/property-details.h
+++ b/deps/v8/src/objects/property-details.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_PROPERTY_DETAILS_H_
#define V8_OBJECTS_PROPERTY_DETAILS_H_
-#include "include/v8.h"
+#include "include/v8-object.h"
#include "src/base/bit-field.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
@@ -242,6 +242,9 @@ enum class PropertyCellType {
kUndefined, // The PREMONOMORPHIC of property cells.
kConstant, // Cell has been assigned only once.
kConstantType, // Cell has been assigned only one type.
+ // Temporary value indicating an ongoing property cell state transition. Only
+ // observable by a background thread.
+ kInTransition,
// Value for dictionaries not holding cells, must be 0:
kNoCell = kMutable,
};
@@ -381,8 +384,7 @@ class PropertyDetails {
// Bit fields in value_ (type, shift, size). Must be public so the
// constants can be embedded in generated code.
using KindField = base::BitField<PropertyKind, 0, 1>;
- using LocationField = KindField::Next<PropertyLocation, 1>;
- using ConstnessField = LocationField::Next<PropertyConstness, 1>;
+ using ConstnessField = KindField::Next<PropertyConstness, 1>;
using AttributesField = ConstnessField::Next<PropertyAttributes, 3>;
static const int kAttributesReadOnlyMask =
(READ_ONLY << AttributesField::kShift);
@@ -392,11 +394,12 @@ class PropertyDetails {
(DONT_ENUM << AttributesField::kShift);
// Bit fields for normalized/dictionary mode objects.
- using PropertyCellTypeField = AttributesField::Next<PropertyCellType, 2>;
+ using PropertyCellTypeField = AttributesField::Next<PropertyCellType, 3>;
using DictionaryStorageField = PropertyCellTypeField::Next<uint32_t, 23>;
// Bit fields for fast objects.
- using RepresentationField = AttributesField::Next<uint32_t, 3>;
+ using LocationField = AttributesField::Next<PropertyLocation, 1>;
+ using RepresentationField = LocationField::Next<uint32_t, 3>;
using DescriptorPointer =
RepresentationField::Next<uint32_t, kDescriptorIndexBitCount>;
using FieldIndexField =
@@ -415,7 +418,6 @@ class PropertyDetails {
STATIC_ASSERT(KindField::kLastUsedBit < 8);
STATIC_ASSERT(ConstnessField::kLastUsedBit < 8);
STATIC_ASSERT(AttributesField::kLastUsedBit < 8);
- STATIC_ASSERT(LocationField::kLastUsedBit < 8);
static const int kInitialIndex = 1;
@@ -445,12 +447,12 @@ class PropertyDetails {
// with an enumeration index of 0 as a single byte.
uint8_t ToByte() {
// We only care about the value of KindField, ConstnessField, and
- // AttributesField. LocationField is also stored, but it will always be
- // kField. We've statically asserted earlier that all those fields fit into
- // a byte together.
+ // AttributesField. We've statically asserted earlier that these fields fit
+ // into a byte together.
+
+ DCHECK_EQ(PropertyLocation::kField, location());
+ STATIC_ASSERT(static_cast<int>(PropertyLocation::kField) == 0);
- // PropertyCellTypeField comes next, its value must be kNoCell == 0 for
- // dictionary mode PropertyDetails anyway.
DCHECK_EQ(PropertyCellType::kNoCell, cell_type());
STATIC_ASSERT(static_cast<int>(PropertyCellType::kNoCell) == 0);
@@ -464,16 +466,13 @@ class PropertyDetails {
// Only to be used for bytes obtained by ToByte. In particular, only used for
// non-global dictionary properties.
static PropertyDetails FromByte(uint8_t encoded_details) {
- // The 0-extension to 32bit sets PropertyCellType to kNoCell and
- // enumeration index to 0, as intended. Everything else is obtained from
- // |encoded_details|.
-
+ // The 0-extension to 32bit sets PropertyLocation to kField,
+ // PropertyCellType to kNoCell, and enumeration index to 0, as intended.
+ // Everything else is obtained from |encoded_details|.
PropertyDetails details(encoded_details);
-
- DCHECK_EQ(0, details.dictionary_index());
DCHECK_EQ(PropertyLocation::kField, details.location());
DCHECK_EQ(PropertyCellType::kNoCell, details.cell_type());
-
+ DCHECK_EQ(0, details.dictionary_index());
return details;
}
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index 10fe0f834e..76b8d92dd8 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -7,6 +7,7 @@
#include <memory>
+#include "include/v8-script.h"
#include "src/base/export-template.h"
#include "src/objects/fixed-array.h"
#include "src/objects/objects.h"
@@ -22,6 +23,10 @@ namespace internal {
class FunctionLiteral;
+namespace wasm {
+class NativeModule;
+} // namespace wasm
+
#include "torque-generated/src/objects/script-tq.inc"
// Script describes a script which has been added to the VM.
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 583ca8dccf..1b8c56386f 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -7,6 +7,7 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
+#include "src/common/globals.h"
#include "src/handles/handles-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/debug-objects-inl.h"
@@ -92,8 +93,6 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledData)
TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithoutPreparseData)
TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithPreparseData)
-TQ_OBJECT_CONSTRUCTORS_IMPL(BaselineData)
-
TQ_OBJECT_CONSTRUCTORS_IMPL(InterpreterData)
ACCESSORS(InterpreterData, raw_interpreter_trampoline, CodeT,
@@ -130,13 +129,37 @@ DEF_ACQUIRE_GETTER(SharedFunctionInfo,
return value;
}
-RENAME_UINT16_TORQUE_ACCESSORS(SharedFunctionInfo,
- internal_formal_parameter_count,
- formal_parameter_count)
+uint16_t SharedFunctionInfo::internal_formal_parameter_count_with_receiver()
+ const {
+ const uint16_t param_count = TorqueGeneratedClass::formal_parameter_count();
+ if (param_count == kDontAdaptArgumentsSentinel) return param_count;
+ return param_count + (kJSArgcIncludesReceiver ? 0 : 1);
+}
+
+uint16_t SharedFunctionInfo::internal_formal_parameter_count_without_receiver()
+ const {
+ const uint16_t param_count = TorqueGeneratedClass::formal_parameter_count();
+ if (param_count == kDontAdaptArgumentsSentinel) return param_count;
+ return param_count - kJSArgcReceiverSlots;
+}
+
+void SharedFunctionInfo::set_internal_formal_parameter_count(int value) {
+ DCHECK_EQ(value, static_cast<uint16_t>(value));
+ DCHECK_GE(value, kJSArgcReceiverSlots);
+ TorqueGeneratedClass::set_formal_parameter_count(value);
+}
+
RENAME_UINT16_TORQUE_ACCESSORS(SharedFunctionInfo, raw_function_token_offset,
function_token_offset)
-IMPLICIT_TAG_RELAXED_INT32_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
+RELAXED_INT32_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
+int32_t SharedFunctionInfo::relaxed_flags() const {
+ return flags(kRelaxedLoad);
+}
+void SharedFunctionInfo::set_relaxed_flags(int32_t flags) {
+ return set_flags(flags, kRelaxedStore);
+}
+
UINT8_ACCESSORS(SharedFunctionInfo, flags2, kFlags2Offset)
bool SharedFunctionInfo::HasSharedName() const {
@@ -253,34 +276,36 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2,
has_static_private_methods_or_accessors,
SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, syntax_kind,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, syntax_kind,
SharedFunctionInfo::FunctionSyntaxKindBits)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, allows_lazy_compilation,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, allows_lazy_compilation,
SharedFunctionInfo::AllowLazyCompilationBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, has_duplicate_parameters,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, has_duplicate_parameters,
SharedFunctionInfo::HasDuplicateParametersBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, native,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, native,
SharedFunctionInfo::IsNativeBit)
#if V8_ENABLE_WEBASSEMBLY
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_asm_wasm_broken,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, is_asm_wasm_broken,
SharedFunctionInfo::IsAsmWasmBrokenBit)
#endif // V8_ENABLE_WEBASSEMBLY
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags,
requires_instance_members_initializer,
SharedFunctionInfo::RequiresInstanceMembersInitializerBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, name_should_print_as_anonymous,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags,
+ name_should_print_as_anonymous,
SharedFunctionInfo::NameShouldPrintAsAnonymousBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, has_reported_binary_coverage,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags,
+ has_reported_binary_coverage,
SharedFunctionInfo::HasReportedBinaryCoverageBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_toplevel,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, is_toplevel,
SharedFunctionInfo::IsTopLevelBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, properties_are_final,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, properties_are_final,
SharedFunctionInfo::PropertiesAreFinalBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags,
private_name_lookup_skips_outer_class,
SharedFunctionInfo::PrivateNameLookupSkipsOuterClassBit)
@@ -289,12 +314,12 @@ bool SharedFunctionInfo::optimization_disabled() const {
}
BailoutReason SharedFunctionInfo::disable_optimization_reason() const {
- return DisabledOptimizationReasonBits::decode(flags());
+ return DisabledOptimizationReasonBits::decode(flags(kRelaxedLoad));
}
LanguageMode SharedFunctionInfo::language_mode() const {
STATIC_ASSERT(LanguageModeSize == 2);
- return construct_language_mode(IsStrictBit::decode(flags()));
+ return construct_language_mode(IsStrictBit::decode(flags(kRelaxedLoad)));
}
void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
@@ -302,22 +327,22 @@ void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
// We only allow language mode transitions that set the same language mode
// again or go up in the chain:
DCHECK(is_sloppy(this->language_mode()) || is_strict(language_mode));
- int hints = flags();
+ int hints = flags(kRelaxedLoad);
hints = IsStrictBit::update(hints, is_strict(language_mode));
- set_flags(hints);
+ set_flags(hints, kRelaxedStore);
UpdateFunctionMapIndex();
}
FunctionKind SharedFunctionInfo::kind() const {
STATIC_ASSERT(FunctionKindBits::kSize == kFunctionKindBitSize);
- return FunctionKindBits::decode(flags());
+ return FunctionKindBits::decode(flags(kRelaxedLoad));
}
void SharedFunctionInfo::set_kind(FunctionKind kind) {
- int hints = flags();
+ int hints = flags(kRelaxedLoad);
hints = FunctionKindBits::update(hints, kind);
hints = IsClassConstructorBit::update(hints, IsClassConstructor(kind));
- set_flags(hints);
+ set_flags(hints, kRelaxedStore);
UpdateFunctionMapIndex();
}
@@ -326,7 +351,7 @@ bool SharedFunctionInfo::is_wrapped() const {
}
bool SharedFunctionInfo::construct_as_builtin() const {
- return ConstructAsBuiltinBit::decode(flags());
+ return ConstructAsBuiltinBit::decode(flags(kRelaxedLoad));
}
void SharedFunctionInfo::CalculateConstructAsBuiltin() {
@@ -340,15 +365,15 @@ void SharedFunctionInfo::CalculateConstructAsBuiltin() {
uses_builtins_construct_stub = true;
}
- int f = flags();
+ int f = flags(kRelaxedLoad);
f = ConstructAsBuiltinBit::update(f, uses_builtins_construct_stub);
- set_flags(f);
+ set_flags(f, kRelaxedStore);
}
int SharedFunctionInfo::function_map_index() const {
// Note: Must be kept in sync with the FastNewClosure builtin.
- int index =
- Context::FIRST_FUNCTION_MAP_INDEX + FunctionMapIndexBits::decode(flags());
+ int index = Context::FIRST_FUNCTION_MAP_INDEX +
+ FunctionMapIndexBits::decode(flags(kRelaxedLoad));
DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
return index;
}
@@ -359,7 +384,8 @@ void SharedFunctionInfo::set_function_map_index(int index) {
DCHECK_LE(Context::FIRST_FUNCTION_MAP_INDEX, index);
DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
index -= Context::FIRST_FUNCTION_MAP_INDEX;
- set_flags(FunctionMapIndexBits::update(flags(), index));
+ set_flags(FunctionMapIndexBits::update(flags(kRelaxedLoad), index),
+ kRelaxedStore);
}
void SharedFunctionInfo::clear_padding() {
@@ -378,7 +404,12 @@ void SharedFunctionInfo::DontAdaptArguments() {
// TODO(leszeks): Revise this DCHECK now that the code field is gone.
DCHECK(!HasWasmExportedFunctionData());
#endif // V8_ENABLE_WEBASSEMBLY
- set_internal_formal_parameter_count(kDontAdaptArgumentsSentinel);
+ TorqueGeneratedClass::set_formal_parameter_count(kDontAdaptArgumentsSentinel);
+}
+
+bool SharedFunctionInfo::IsDontAdaptArguments() const {
+ return TorqueGeneratedClass::formal_parameter_count() ==
+ kDontAdaptArgumentsSentinel;
}
bool SharedFunctionInfo::IsInterpreted() const { return HasBytecodeArray(); }
@@ -484,8 +515,8 @@ IsCompiledScope SharedFunctionInfo::is_compiled_scope(IsolateT* isolate) const {
IsCompiledScope::IsCompiledScope(const SharedFunctionInfo shared,
Isolate* isolate)
: is_compiled_(shared.is_compiled()) {
- if (shared.HasBaselineData()) {
- retain_code_ = handle(shared.baseline_data(), isolate);
+ if (shared.HasBaselineCode()) {
+ retain_code_ = handle(shared.baseline_code(kAcquireLoad), isolate);
} else if (shared.HasBytecodeArray()) {
retain_code_ = handle(shared.GetBytecodeArray(isolate), isolate);
} else {
@@ -498,8 +529,9 @@ IsCompiledScope::IsCompiledScope(const SharedFunctionInfo shared,
IsCompiledScope::IsCompiledScope(const SharedFunctionInfo shared,
LocalIsolate* isolate)
: is_compiled_(shared.is_compiled()) {
- if (shared.HasBaselineData()) {
- retain_code_ = isolate->heap()->NewPersistentHandle(shared.baseline_data());
+ if (shared.HasBaselineCode()) {
+ retain_code_ = isolate->heap()->NewPersistentHandle(
+ shared.baseline_code(kAcquireLoad));
} else if (shared.HasBytecodeArray()) {
retain_code_ =
isolate->heap()->NewPersistentHandle(shared.GetBytecodeArray(isolate));
@@ -530,8 +562,7 @@ FunctionTemplateInfo SharedFunctionInfo::get_api_func_data() const {
bool SharedFunctionInfo::HasBytecodeArray() const {
Object data = function_data(kAcquireLoad);
- return data.IsBytecodeArray() || data.IsInterpreterData() ||
- data.IsBaselineData();
+ return data.IsBytecodeArray() || data.IsInterpreterData() || data.IsCodeT();
}
template <typename IsolateT>
@@ -547,40 +578,14 @@ BytecodeArray SharedFunctionInfo::GetBytecodeArray(IsolateT* isolate) const {
return GetActiveBytecodeArray();
}
-DEF_GETTER(BaselineData, baseline_code, Code) {
- return FromCodeT(TorqueGeneratedClass::baseline_code(cage_base));
-}
-
-void BaselineData::set_baseline_code(Code code, WriteBarrierMode mode) {
- return TorqueGeneratedClass::set_baseline_code(ToCodeT(code), mode);
-}
-
-BytecodeArray BaselineData::GetActiveBytecodeArray() const {
- Object data = this->data();
- if (data.IsBytecodeArray()) {
- return BytecodeArray::cast(data);
- } else {
- DCHECK(data.IsInterpreterData());
- return InterpreterData::cast(data).bytecode_array();
- }
-}
-
-void BaselineData::SetActiveBytecodeArray(BytecodeArray bytecode) {
- Object data = this->data();
- if (data.IsBytecodeArray()) {
- set_data(bytecode);
- } else {
- DCHECK(data.IsInterpreterData());
- InterpreterData::cast(data).set_bytecode_array(bytecode);
- }
-}
-
BytecodeArray SharedFunctionInfo::GetActiveBytecodeArray() const {
Object data = function_data(kAcquireLoad);
+ if (data.IsCodeT()) {
+ Code baseline_code = FromCodeT(CodeT::cast(data));
+ data = baseline_code.bytecode_or_interpreter_data();
+ }
if (data.IsBytecodeArray()) {
return BytecodeArray::cast(data);
- } else if (data.IsBaselineData()) {
- return baseline_data().GetActiveBytecodeArray();
} else {
DCHECK(data.IsInterpreterData());
return InterpreterData::cast(data).bytecode_array();
@@ -588,11 +593,13 @@ BytecodeArray SharedFunctionInfo::GetActiveBytecodeArray() const {
}
void SharedFunctionInfo::SetActiveBytecodeArray(BytecodeArray bytecode) {
+ // We don't allow setting the active bytecode array on baseline-optimized
+ // functions. They should have been flushed earlier.
+ DCHECK(!HasBaselineCode());
+
Object data = function_data(kAcquireLoad);
if (data.IsBytecodeArray()) {
set_function_data(bytecode, kReleaseStore);
- } else if (data.IsBaselineData()) {
- baseline_data().SetActiveBytecodeArray(bytecode);
} else {
DCHECK(data.IsInterpreterData());
interpreter_data().set_bytecode_array(bytecode);
@@ -618,12 +625,13 @@ bool SharedFunctionInfo::ShouldFlushCode(
// check if it is old. Note, this is done this way since this function can be
// called by the concurrent marker.
Object data = function_data(kAcquireLoad);
- if (data.IsBaselineData()) {
+ if (data.IsCodeT()) {
+ Code baseline_code = FromCodeT(CodeT::cast(data));
+ DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
// If baseline code flushing isn't enabled and we have baseline data on SFI
// we cannot flush baseline / bytecode.
if (!IsBaselineCodeFlushingEnabled(code_flush_mode)) return false;
- data =
- ACQUIRE_READ_FIELD(BaselineData::cast(data), BaselineData::kDataOffset);
+ data = baseline_code.bytecode_or_interpreter_data();
} else if (!IsByteCodeFlushingEnabled(code_flush_mode)) {
// If bytecode flushing isn't enabled and there is no baseline code there is
// nothing to flush.
@@ -645,40 +653,56 @@ Code SharedFunctionInfo::InterpreterTrampoline() const {
bool SharedFunctionInfo::HasInterpreterData() const {
Object data = function_data(kAcquireLoad);
- if (data.IsBaselineData()) data = BaselineData::cast(data).data();
+ if (data.IsCodeT()) {
+ Code baseline_code = FromCodeT(CodeT::cast(data));
+ DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
+ data = baseline_code.bytecode_or_interpreter_data();
+ }
return data.IsInterpreterData();
}
InterpreterData SharedFunctionInfo::interpreter_data() const {
DCHECK(HasInterpreterData());
Object data = function_data(kAcquireLoad);
- if (data.IsBaselineData()) data = BaselineData::cast(data).data();
+ if (data.IsCodeT()) {
+ Code baseline_code = FromCodeT(CodeT::cast(data));
+ DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
+ data = baseline_code.bytecode_or_interpreter_data();
+ }
return InterpreterData::cast(data);
}
void SharedFunctionInfo::set_interpreter_data(
InterpreterData interpreter_data) {
DCHECK(FLAG_interpreted_frames_native_stack);
- DCHECK(!HasBaselineData());
+ DCHECK(!HasBaselineCode());
set_function_data(interpreter_data, kReleaseStore);
}
-bool SharedFunctionInfo::HasBaselineData() const {
- return function_data(kAcquireLoad).IsBaselineData();
+bool SharedFunctionInfo::HasBaselineCode() const {
+ Object data = function_data(kAcquireLoad);
+ if (data.IsCodeT()) {
+ DCHECK_EQ(FromCodeT(CodeT::cast(data)).kind(), CodeKind::BASELINE);
+ return true;
+ }
+ return false;
}
-BaselineData SharedFunctionInfo::baseline_data() const {
- DCHECK(HasBaselineData());
- return BaselineData::cast(function_data(kAcquireLoad));
+Code SharedFunctionInfo::baseline_code(AcquireLoadTag) const {
+ DCHECK(HasBaselineCode());
+ return FromCodeT(CodeT::cast(function_data(kAcquireLoad)));
}
-void SharedFunctionInfo::set_baseline_data(BaselineData baseline_data) {
- set_function_data(baseline_data, kReleaseStore);
+void SharedFunctionInfo::set_baseline_code(Code baseline_code,
+ ReleaseStoreTag) {
+ DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
+ set_function_data(ToCodeT(baseline_code), kReleaseStore);
}
-void SharedFunctionInfo::flush_baseline_data() {
- DCHECK(HasBaselineData());
- set_function_data(baseline_data().data(), kReleaseStore);
+void SharedFunctionInfo::FlushBaselineCode() {
+ DCHECK(HasBaselineCode());
+ set_function_data(baseline_code(kAcquireLoad).bytecode_or_interpreter_data(),
+ kReleaseStore);
}
#if V8_ENABLE_WEBASSEMBLY
@@ -898,11 +922,11 @@ bool SharedFunctionInfo::CanDiscardCompiled() const {
if (HasAsmWasmData()) return true;
#endif // V8_ENABLE_WEBASSEMBLY
return HasBytecodeArray() || HasUncompiledDataWithPreparseData() ||
- HasBaselineData();
+ HasBaselineCode();
}
bool SharedFunctionInfo::is_class_constructor() const {
- return IsClassConstructorBit::decode(flags());
+ return IsClassConstructorBit::decode(flags(kRelaxedLoad));
}
void SharedFunctionInfo::set_are_properties_final(bool value) {
diff --git a/deps/v8/src/objects/shared-function-info.cc b/deps/v8/src/objects/shared-function-info.cc
index 22e98a140c..4354a2af28 100644
--- a/deps/v8/src/objects/shared-function-info.cc
+++ b/deps/v8/src/objects/shared-function-info.cc
@@ -8,6 +8,7 @@
#include "src/ast/scopes.h"
#include "src/codegen/compilation-cache.h"
#include "src/codegen/compiler.h"
+#include "src/common/globals.h"
#include "src/diagnostics/code-tracer.h"
#include "src/objects/shared-function-info-inl.h"
#include "src/strings/string-builder-inl.h"
@@ -52,13 +53,13 @@ void SharedFunctionInfo::Init(ReadOnlyRoots ro_roots, int unique_id) {
// Set integer fields (smi or int, depending on the architecture).
set_length(0);
- set_internal_formal_parameter_count(0);
+ set_internal_formal_parameter_count(JSParameterCount(0));
set_expected_nof_properties(0);
set_raw_function_token_offset(0);
// All flags default to false or 0, except ConstructAsBuiltinBit just because
// we're using the kIllegal builtin.
- set_flags(ConstructAsBuiltinBit::encode(true));
+ set_flags(ConstructAsBuiltinBit::encode(true), kRelaxedStore);
set_flags2(0);
UpdateFunctionMapIndex();
@@ -84,10 +85,10 @@ Code SharedFunctionInfo::GetCode() const {
DCHECK(HasBytecodeArray());
return isolate->builtins()->code(Builtin::kInterpreterEntryTrampoline);
}
- if (data.IsBaselineData()) {
- // Having BaselineData means we are a compiled, baseline function.
- DCHECK(HasBaselineData());
- return baseline_data().baseline_code();
+ if (data.IsCodeT()) {
+ // Having baseline Code means we are a compiled, baseline function.
+ DCHECK(HasBaselineCode());
+ return FromCodeT(CodeT::cast(data));
}
#if V8_ENABLE_WEBASSEMBLY
if (data.IsAsmWasmData()) {
@@ -435,7 +436,8 @@ std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v) {
void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
DCHECK_NE(reason, BailoutReason::kNoReason);
- set_flags(DisabledOptimizationReasonBits::update(flags(), reason));
+ set_flags(DisabledOptimizationReasonBits::update(flags(kRelaxedLoad), reason),
+ kRelaxedStore);
// Code should be the lazy compilation stub or else interpreted.
Isolate* isolate = GetIsolate();
DCHECK(abstract_code(isolate).kind() == CodeKind::INTERPRETED_FUNCTION ||
@@ -459,7 +461,8 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
// When adding fields here, make sure DeclarationScope::AnalyzePartially is
// updated accordingly.
- shared_info->set_internal_formal_parameter_count(lit->parameter_count());
+ shared_info->set_internal_formal_parameter_count(
+ JSParameterCount(lit->parameter_count()));
shared_info->SetFunctionTokenPosition(lit->function_token_position(),
lit->start_position());
shared_info->set_syntax_kind(lit->syntax_kind());
@@ -704,6 +707,7 @@ void SharedFunctionInfo::UninstallDebugBytecode(SharedFunctionInfo shared,
isolate->shared_function_info_access());
DebugInfo debug_info = shared.GetDebugInfo();
BytecodeArray original_bytecode_array = debug_info.OriginalBytecodeArray();
+ DCHECK(!shared.HasBaselineCode());
shared.SetActiveBytecodeArray(original_bytecode_array);
debug_info.set_original_bytecode_array(
ReadOnlyRoots(isolate).undefined_value(), kReleaseStore);
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index fd19f90165..598ccfd883 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -10,6 +10,7 @@
#include "src/base/bit-field.h"
#include "src/builtins/builtins.h"
#include "src/codegen/bailout-reason.h"
+#include "src/common/globals.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/function-kind.h"
#include "src/objects/function-syntax-kind.h"
@@ -154,16 +155,6 @@ class InterpreterData
TQ_OBJECT_CONSTRUCTORS(InterpreterData)
};
-class BaselineData : public TorqueGeneratedBaselineData<BaselineData, Struct> {
- public:
- inline BytecodeArray GetActiveBytecodeArray() const;
- inline void SetActiveBytecodeArray(BytecodeArray bytecode);
-
- DECL_ACCESSORS(baseline_code, Code)
-
- TQ_OBJECT_CONSTRUCTORS(BaselineData)
-};
-
// SharedFunctionInfo describes the JSFunction information that can be
// shared by multiple instances of the function.
class SharedFunctionInfo
@@ -275,8 +266,12 @@ class SharedFunctionInfo
// [internal formal parameter count]: The declared number of parameters.
// For subclass constructors, also includes new.target.
- // The size of function's frame is internal_formal_parameter_count + 1.
- DECL_UINT16_ACCESSORS(internal_formal_parameter_count)
+ // The size of function's frame is
+ // internal_formal_parameter_count_with_receiver.
+ inline void set_internal_formal_parameter_count(int value);
+ inline uint16_t internal_formal_parameter_count_with_receiver() const;
+ inline uint16_t internal_formal_parameter_count_without_receiver() const;
+
private:
using TorqueGeneratedSharedFunctionInfo::formal_parameter_count;
using TorqueGeneratedSharedFunctionInfo::set_formal_parameter_count;
@@ -285,6 +280,7 @@ class SharedFunctionInfo
// Set the formal parameter count so the function code will be
// called without using argument adaptor frames.
inline void DontAdaptArguments();
+ inline bool IsDontAdaptArguments() const;
// [function data]: This field holds some additional data for function.
// Currently it has one of:
@@ -314,10 +310,10 @@ class SharedFunctionInfo
inline bool HasInterpreterData() const;
inline InterpreterData interpreter_data() const;
inline void set_interpreter_data(InterpreterData interpreter_data);
- inline bool HasBaselineData() const;
- inline BaselineData baseline_data() const;
- inline void set_baseline_data(BaselineData Baseline_data);
- inline void flush_baseline_data();
+ inline bool HasBaselineCode() const;
+ inline Code baseline_code(AcquireLoadTag) const;
+ inline void set_baseline_code(Code baseline_code, ReleaseStoreTag);
+ inline void FlushBaselineCode();
inline BytecodeArray GetActiveBytecodeArray() const;
inline void SetActiveBytecodeArray(BytecodeArray bytecode);
@@ -414,7 +410,7 @@ class SharedFunctionInfo
inline bool HasSharedName() const;
// [flags] Bit field containing various flags about the function.
- DECL_INT32_ACCESSORS(flags)
+ DECL_RELAXED_INT32_ACCESSORS(flags)
DECL_UINT8_ACCESSORS(flags2)
// True if the outer class scope contains a private brand for
@@ -673,6 +669,10 @@ class SharedFunctionInfo
inline uint16_t get_property_estimate_from_literal(FunctionLiteral* literal);
+ // For ease of use of the BITFIELD macro.
+ inline int32_t relaxed_flags() const;
+ inline void set_relaxed_flags(int32_t flags);
+
template <typename Impl>
friend class FactoryBase;
friend class V8HeapExplorer;
diff --git a/deps/v8/src/objects/shared-function-info.tq b/deps/v8/src/objects/shared-function-info.tq
index 0b0930b6b4..4f80f568dc 100644
--- a/deps/v8/src/objects/shared-function-info.tq
+++ b/deps/v8/src/objects/shared-function-info.tq
@@ -14,13 +14,6 @@ extern class InterpreterData extends Struct {
@ifnot(V8_EXTERNAL_CODE_SPACE) interpreter_trampoline: Code;
}
-@generatePrint
-extern class BaselineData extends Struct {
- @if(V8_EXTERNAL_CODE_SPACE) baseline_code: CodeDataContainer;
- @ifnot(V8_EXTERNAL_CODE_SPACE) baseline_code: Code;
- data: BytecodeArray|InterpreterData;
-}
-
type FunctionKind extends uint8 constexpr 'FunctionKind';
type FunctionSyntaxKind extends uint8 constexpr 'FunctionSyntaxKind';
type BailoutReason extends uint8 constexpr 'BailoutReason';
@@ -63,11 +56,17 @@ class SharedFunctionInfo extends HeapObject {
name_or_scope_info: String|NoSharedNameSentinel|ScopeInfo;
outer_scope_info_or_feedback_metadata: HeapObject;
script_or_debug_info: Script|DebugInfo|Undefined;
- // [length]: The function length - usually the number of declared parameters.
+ // [length]: The function length - usually the number of declared parameters
+ // (always without the receiver).
// Use up to 2^16-2 parameters (16 bits of values, where one is reserved for
// kDontAdaptArgumentsSentinel). The value is only reliable when the function
// has been compiled.
length: int16;
+ // [formal_parameter_count]: The number of declared parameters (or the special
+ // value kDontAdaptArgumentsSentinel to indicate that arguments are passed
+ // unaltered).
+ // In contrast to [length], formal_parameter_count includes the receiver if
+ // kJSArgcIncludesReceiver is true.
formal_parameter_count: uint16;
function_token_offset: uint16;
// [expected_nof_properties]: Expected number of properties for the
@@ -84,6 +83,40 @@ class SharedFunctionInfo extends HeapObject {
@if(V8_SFI_HAS_UNIQUE_ID) unique_id: int32;
}
+const kDontAdaptArgumentsSentinel: constexpr int32
+ generates 'kDontAdaptArgumentsSentinel';
+const kJSArgcIncludesReceiver:
+ constexpr bool generates 'kJSArgcIncludesReceiver';
+@export
+macro LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(
+ sfi: SharedFunctionInfo): uint16 {
+ let formalParameterCount = sfi.formal_parameter_count;
+ if (kJSArgcIncludesReceiver) {
+ if (Convert<int32>(formalParameterCount) != kDontAdaptArgumentsSentinel) {
+ formalParameterCount = Convert<uint16>(formalParameterCount - 1);
+ }
+ }
+ return formalParameterCount;
+}
+
+@export
+macro LoadSharedFunctionInfoFormalParameterCountWithReceiver(
+ sfi: SharedFunctionInfo): uint16 {
+ let formalParameterCount = sfi.formal_parameter_count;
+ if (!kJSArgcIncludesReceiver) {
+ if (Convert<int32>(formalParameterCount) != kDontAdaptArgumentsSentinel) {
+ formalParameterCount = Convert<uint16>(formalParameterCount + 1);
+ }
+ }
+ return formalParameterCount;
+}
+
+@export
+macro IsSharedFunctionInfoDontAdaptArguments(sfi: SharedFunctionInfo): bool {
+ const formalParameterCount = sfi.formal_parameter_count;
+ return Convert<int32>(formalParameterCount) == kDontAdaptArgumentsSentinel;
+}
+
@abstract
@export
@customCppClass
diff --git a/deps/v8/src/objects/tagged-field.h b/deps/v8/src/objects/tagged-field.h
index 7faf9e9ac9..d9fc0bb102 100644
--- a/deps/v8/src/objects/tagged-field.h
+++ b/deps/v8/src/objects/tagged-field.h
@@ -49,7 +49,7 @@ class TaggedField : public AllStatic {
int offset = 0);
static inline void Relaxed_Store(HeapObject host, T value);
- static void Relaxed_Store(HeapObject host, int offset, T value);
+ static inline void Relaxed_Store(HeapObject host, int offset, T value);
static inline T Acquire_Load(HeapObject host, int offset = 0);
static inline T Acquire_Load_No_Unpack(PtrComprCageBase cage_base,
diff --git a/deps/v8/src/objects/tagged-impl.h b/deps/v8/src/objects/tagged-impl.h
index e7278a1245..6b01c6fe62 100644
--- a/deps/v8/src/objects/tagged-impl.h
+++ b/deps/v8/src/objects/tagged-impl.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_TAGGED_IMPL_H_
#include "include/v8-internal.h"
-#include "include/v8.h"
#include "src/common/globals.h"
namespace v8 {
diff --git a/deps/v8/src/objects/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
index 53bb0cf927..a84cf4e2c4 100644
--- a/deps/v8/src/objects/value-serializer.cc
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -6,8 +6,10 @@
#include <type_traits>
+#include "include/v8-maybe.h"
#include "include/v8-value-serializer-version.h"
-#include "include/v8.h"
+#include "include/v8-value-serializer.h"
+#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
#include "src/base/logging.h"
#include "src/base/platform/wrappers.h"
diff --git a/deps/v8/src/objects/value-serializer.h b/deps/v8/src/objects/value-serializer.h
index 8a381d1691..c6363e67c6 100644
--- a/deps/v8/src/objects/value-serializer.h
+++ b/deps/v8/src/objects/value-serializer.h
@@ -8,7 +8,7 @@
#include <cstdint>
#include <vector>
-#include "include/v8.h"
+#include "include/v8-value-serializer.h"
#include "src/base/compiler-specific.h"
#include "src/base/macros.h"
#include "src/base/strings.h"
diff --git a/deps/v8/src/objects/visitors.h b/deps/v8/src/objects/visitors.h
index a784cec756..d527cb0a9a 100644
--- a/deps/v8/src/objects/visitors.h
+++ b/deps/v8/src/objects/visitors.h
@@ -168,7 +168,7 @@ class ObjectVisitor {
virtual void VisitOffHeapTarget(Code host, RelocInfo* rinfo) {}
// Visits the relocation info using the given iterator.
- virtual void VisitRelocInfo(RelocIterator* it);
+ void VisitRelocInfo(RelocIterator* it);
// Visits the object's map pointer, decoding as necessary
virtual void VisitMapPointer(HeapObject host) { UNREACHABLE(); }
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index c6bcb221ea..57153c345b 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -9,7 +9,6 @@
#include <memory>
#include <vector>
-#include "include/v8.h"
#include "src/base/bit-field.h"
#include "src/base/export-template.h"
#include "src/base/logging.h"
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 108b11edc8..ef2fb7ef3e 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -27,12 +27,15 @@
#include "src/parsing/parse-info.h"
#include "src/parsing/scanner.h"
#include "src/parsing/token.h"
+#include "src/regexp/regexp.h"
#include "src/utils/pointer-with-payload.h"
#include "src/zone/zone-chunk-list.h"
namespace v8 {
namespace internal {
+class PreParserIdentifier;
+
enum FunctionNameValidity {
kFunctionNameIsStrictReserved,
kSkipFunctionNameCheck,
@@ -1074,22 +1077,24 @@ class ParserBase {
}
// Report syntax errors.
- V8_NOINLINE void ReportMessage(MessageTemplate message) {
- Scanner::Location source_location = scanner()->location();
- impl()->ReportMessageAt(source_location, message,
- static_cast<const char*>(nullptr));
+ template <typename... Ts>
+ V8_NOINLINE void ReportMessage(MessageTemplate message, const Ts&... args) {
+ ReportMessageAt(scanner()->location(), message, args...);
}
- template <typename T>
- V8_NOINLINE void ReportMessage(MessageTemplate message, T arg) {
- Scanner::Location source_location = scanner()->location();
- impl()->ReportMessageAt(source_location, message, arg);
+ template <typename... Ts>
+ V8_NOINLINE void ReportMessageAt(Scanner::Location source_location,
+ MessageTemplate message, const Ts&... args) {
+ impl()->pending_error_handler()->ReportMessageAt(
+ source_location.beg_pos, source_location.end_pos, message, args...);
+ scanner()->set_parser_error();
}
- V8_NOINLINE void ReportMessageAt(Scanner::Location location,
- MessageTemplate message) {
- impl()->ReportMessageAt(location, message,
- static_cast<const char*>(nullptr));
+ V8_NOINLINE void ReportMessageAt(Scanner::Location source_location,
+ MessageTemplate message,
+ const PreParserIdentifier& arg0) {
+ ReportMessageAt(source_location, message,
+ impl()->PreParserIdentifierToAstRawString(arg0));
}
V8_NOINLINE void ReportUnexpectedToken(Token::Value token);
@@ -1122,6 +1127,12 @@ class ParserBase {
}
V8_INLINE IdentifierT ParseAndClassifyIdentifier(Token::Value token);
+
+ // Similar logic to ParseAndClassifyIdentifier but the identifier is
+ // already parsed in prop_info. Returns false if this is an invalid
+ // identifier or an invalid use of the "arguments" keyword.
+ V8_INLINE bool ClassifyPropertyIdentifier(Token::Value token,
+ ParsePropertyInfo* prop_info);
// Parses an identifier or a strict mode future reserved word. Allows passing
// in function_kind for the case of parsing the identifier in a function
// expression, where the relevant "function_kind" bit is of the function being
@@ -1140,6 +1151,11 @@ class ParserBase {
ExpressionT ParsePropertyOrPrivatePropertyName();
+ const AstRawString* GetNextSymbolForRegExpLiteral() const {
+ return scanner()->NextSymbol(ast_value_factory());
+ }
+ bool ValidateRegExpLiteral(const AstRawString* pattern, RegExpFlags flags,
+ RegExpError* regexp_error);
ExpressionT ParseRegExpLiteral();
ExpressionT ParseBindingPattern();
@@ -1634,8 +1650,39 @@ void ParserBase<Impl>::ReportUnexpectedToken(Token::Value token) {
}
template <typename Impl>
+bool ParserBase<Impl>::ClassifyPropertyIdentifier(
+ Token::Value next, ParsePropertyInfo* prop_info) {
+ // Updates made here must be reflected on ParseAndClassifyIdentifier.
+ if (V8_LIKELY(base::IsInRange(next, Token::IDENTIFIER, Token::ASYNC))) {
+ if (V8_UNLIKELY(impl()->IsArguments(prop_info->name) &&
+ scope()->ShouldBanArguments())) {
+ ReportMessage(
+ MessageTemplate::kArgumentsDisallowedInInitializerAndStaticBlock);
+ return false;
+ }
+ return true;
+ }
+
+ if (!Token::IsValidIdentifier(next, language_mode(), is_generator(),
+ is_await_as_identifier_disallowed())) {
+ ReportUnexpectedToken(next);
+ return false;
+ }
+
+ DCHECK(!prop_info->is_computed_name);
+
+ if (next == Token::AWAIT) {
+ DCHECK(!is_async_function());
+ expression_scope()->RecordAsyncArrowParametersError(
+ scanner()->peek_location(), MessageTemplate::kAwaitBindingIdentifier);
+ }
+ return true;
+}
+
+template <typename Impl>
typename ParserBase<Impl>::IdentifierT
ParserBase<Impl>::ParseAndClassifyIdentifier(Token::Value next) {
+ // Updates made here must be reflected on ClassifyPropertyIdentifier.
DCHECK_EQ(scanner()->current_token(), next);
if (V8_LIKELY(base::IsInRange(next, Token::IDENTIFIER, Token::ASYNC))) {
IdentifierT name = impl()->GetIdentifier();
@@ -1746,6 +1793,25 @@ ParserBase<Impl>::ParsePropertyOrPrivatePropertyName() {
}
template <typename Impl>
+bool ParserBase<Impl>::ValidateRegExpLiteral(const AstRawString* pattern,
+ RegExpFlags flags,
+ RegExpError* regexp_error) {
+ // TODO(jgruber): If already validated in the preparser, skip validation in
+ // the parser.
+ DisallowGarbageCollection no_gc;
+ const unsigned char* d = pattern->raw_data();
+ if (pattern->is_one_byte()) {
+ return RegExp::VerifySyntax(zone(), stack_limit(),
+ static_cast<const uint8_t*>(d),
+ pattern->length(), flags, regexp_error, no_gc);
+ } else {
+ return RegExp::VerifySyntax(zone(), stack_limit(),
+ reinterpret_cast<const uint16_t*>(d),
+ pattern->length(), flags, regexp_error, no_gc);
+ }
+}
+
+template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral() {
int pos = peek_position();
if (!scanner()->ScanRegExpPattern()) {
@@ -1754,15 +1820,22 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral() {
return impl()->FailureExpression();
}
- IdentifierT js_pattern = impl()->GetNextSymbol();
- Maybe<int> flags = scanner()->ScanRegExpFlags();
- if (flags.IsNothing()) {
+ const AstRawString* js_pattern = GetNextSymbolForRegExpLiteral();
+ base::Optional<RegExpFlags> flags = scanner()->ScanRegExpFlags();
+ if (!flags.has_value()) {
Next();
ReportMessage(MessageTemplate::kMalformedRegExpFlags);
return impl()->FailureExpression();
}
Next();
- return factory()->NewRegExpLiteral(js_pattern, flags.FromJust(), pos);
+ RegExpError regexp_error;
+ if (!ValidateRegExpLiteral(js_pattern, flags.value(), &regexp_error)) {
+ if (RegExpErrorIsStackOverflow(regexp_error)) set_stack_overflow();
+ ReportMessage(MessageTemplate::kMalformedRegExp, js_pattern,
+ RegExpErrorString(regexp_error));
+ return impl()->FailureExpression();
+ }
+ return factory()->NewRegExpLiteral(js_pattern, flags.value(), pos);
}
template <typename Impl>
@@ -2514,7 +2587,6 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
IdentifierT name = prop_info->name;
ParseFunctionFlags function_flags = prop_info->function_flags;
- ParsePropertyKind kind = prop_info->kind;
switch (prop_info->kind) {
case ParsePropertyKind::kSpread:
@@ -2562,19 +2634,10 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
// IdentifierReference Initializer?
DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
- if (!Token::IsValidIdentifier(name_token, language_mode(), is_generator(),
- is_await_as_identifier_disallowed())) {
- ReportUnexpectedToken(Next());
+ if (!ClassifyPropertyIdentifier(name_token, prop_info)) {
return impl()->NullLiteralProperty();
}
- DCHECK(!prop_info->is_computed_name);
-
- if (name_token == Token::AWAIT) {
- DCHECK(!is_async_function());
- expression_scope()->RecordAsyncArrowParametersError(
- next_loc, MessageTemplate::kAwaitBindingIdentifier);
- }
ExpressionT lhs =
impl()->ExpressionFromIdentifier(name, next_loc.beg_pos);
if (!IsAssignableIdentifier(lhs)) {
@@ -2637,7 +2700,7 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
case ParsePropertyKind::kAccessorGetter:
case ParsePropertyKind::kAccessorSetter: {
DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
- bool is_get = kind == ParsePropertyKind::kAccessorGetter;
+ bool is_get = prop_info->kind == ParsePropertyKind::kAccessorGetter;
expression_scope()->RecordPatternError(
Scanner::Location(next_loc.beg_pos, end_position()),
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index 6b50ed134c..c5cc0c8030 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -701,25 +701,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return NewThrowError(Runtime::kNewTypeError, message, arg, pos);
}
- // Reporting errors.
- void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const char* arg = nullptr) {
- pending_error_handler()->ReportMessageAt(
- source_location.beg_pos, source_location.end_pos, message, arg);
- scanner_.set_parser_error();
- }
-
// Dummy implementation. The parser should never have a unidentifiable
// error.
V8_INLINE void ReportUnidentifiableError() { UNREACHABLE(); }
- void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const AstRawString* arg) {
- pending_error_handler()->ReportMessageAt(
- source_location.beg_pos, source_location.end_pos, message, arg);
- scanner_.set_parser_error();
- }
-
const AstRawString* GetRawNameFromIdentifier(const AstRawString* arg) {
return arg;
}
diff --git a/deps/v8/src/parsing/pending-compilation-error-handler.cc b/deps/v8/src/parsing/pending-compilation-error-handler.cc
index 60bc8ada27..4756628ca7 100644
--- a/deps/v8/src/parsing/pending-compilation-error-handler.cc
+++ b/deps/v8/src/parsing/pending-compilation-error-handler.cc
@@ -19,49 +19,53 @@ namespace internal {
void PendingCompilationErrorHandler::MessageDetails::SetString(
Handle<String> string, Isolate* isolate) {
- DCHECK_NE(type_, kMainThreadHandle);
- type_ = kMainThreadHandle;
- arg_handle_ = string;
+ DCHECK_NE(args_[0].type, kMainThreadHandle);
+ args_[0].type = kMainThreadHandle;
+ args_[0].js_string = string;
}
void PendingCompilationErrorHandler::MessageDetails::SetString(
Handle<String> string, LocalIsolate* isolate) {
- DCHECK_NE(type_, kMainThreadHandle);
- type_ = kMainThreadHandle;
- arg_handle_ = isolate->heap()->NewPersistentHandle(string);
+ DCHECK_NE(args_[0].type, kMainThreadHandle);
+ args_[0].type = kMainThreadHandle;
+ args_[0].js_string = isolate->heap()->NewPersistentHandle(string);
}
template <typename IsolateT>
void PendingCompilationErrorHandler::MessageDetails::Prepare(
IsolateT* isolate) {
- switch (type_) {
- case kAstRawString:
- return SetString(arg_->string(), isolate);
-
- case kNone:
- case kConstCharString:
- // We can delay allocation until ArgumentString(isolate).
- // TODO(leszeks): We don't actually have to transfer this string, since
- // it's a root.
- return;
-
- case kMainThreadHandle:
- // The message details might already be prepared, so skip them if this is
- // the case.
- return;
+ for (int i = 0; i < kMaxArgumentCount; i++) {
+ switch (args_[i].type) {
+ case kAstRawString:
+ return SetString(args_[i].ast_string->string(), isolate);
+
+ case kNone:
+ case kConstCharString:
+ // We can delay allocation until ArgString(isolate).
+ return;
+
+ case kMainThreadHandle:
+ // The message details might already be prepared, so skip them if this
+ // is the case.
+ return;
+ }
}
}
-Handle<String> PendingCompilationErrorHandler::MessageDetails::ArgumentString(
- Isolate* isolate) const {
- switch (type_) {
+Handle<String> PendingCompilationErrorHandler::MessageDetails::ArgString(
+ Isolate* isolate, int index) const {
+ // `index` may be >= argc; in that case we return a default value to pass on
+ // elsewhere.
+ DCHECK_LT(index, kMaxArgumentCount);
+ switch (args_[index].type) {
case kMainThreadHandle:
- return arg_handle_;
+ return args_[index].js_string;
case kNone:
- return isolate->factory()->undefined_string();
+ return Handle<String>::null();
case kConstCharString:
return isolate->factory()
- ->NewStringFromUtf8(base::CStrVector(char_arg_), AllocationType::kOld)
+ ->NewStringFromUtf8(base::CStrVector(args_[index].c_string),
+ AllocationType::kOld)
.ToHandleChecked();
case kAstRawString:
UNREACHABLE();
@@ -93,6 +97,17 @@ void PendingCompilationErrorHandler::ReportMessageAt(int start_position,
error_details_ = MessageDetails(start_position, end_position, message, arg);
}
+void PendingCompilationErrorHandler::ReportMessageAt(int start_position,
+ int end_position,
+ MessageTemplate message,
+ const AstRawString* arg0,
+ const char* arg1) {
+ if (has_pending_error_) return;
+ has_pending_error_ = true;
+ error_details_ =
+ MessageDetails(start_position, end_position, message, arg0, arg1);
+}
+
void PendingCompilationErrorHandler::ReportWarningAt(int start_position,
int end_position,
MessageTemplate message,
@@ -119,7 +134,8 @@ void PendingCompilationErrorHandler::ReportWarnings(
for (const MessageDetails& warning : warning_messages_) {
MessageLocation location = warning.GetLocation(script);
- Handle<String> argument = warning.ArgumentString(isolate);
+ Handle<String> argument = warning.ArgString(isolate, 0);
+ DCHECK_LT(warning.ArgCount(), 2); // Arg1 is only used for errors.
Handle<JSMessageObject> message =
MessageHandler::MakeMessageObject(isolate, warning.message(), &location,
argument, Handle<FixedArray>::null());
@@ -160,12 +176,13 @@ void PendingCompilationErrorHandler::ThrowPendingError(
if (!has_pending_error_) return;
MessageLocation location = error_details_.GetLocation(script);
- Handle<String> argument = error_details_.ArgumentString(isolate);
+ Handle<String> arg0 = error_details_.ArgString(isolate, 0);
+ Handle<String> arg1 = error_details_.ArgString(isolate, 1);
isolate->debug()->OnCompileError(script);
Factory* factory = isolate->factory();
Handle<JSObject> error =
- factory->NewSyntaxError(error_details_.message(), argument);
+ factory->NewSyntaxError(error_details_.message(), arg0, arg1);
isolate->ThrowAt(error, &location);
}
@@ -173,7 +190,8 @@ Handle<String> PendingCompilationErrorHandler::FormatErrorMessageForTest(
Isolate* isolate) {
error_details_.Prepare(isolate);
return MessageFormatter::Format(isolate, error_details_.message(),
- error_details_.ArgumentString(isolate));
+ error_details_.ArgString(isolate, 0),
+ error_details_.ArgString(isolate, 1));
}
} // namespace internal
diff --git a/deps/v8/src/parsing/pending-compilation-error-handler.h b/deps/v8/src/parsing/pending-compilation-error-handler.h
index 31e765d514..9384e94df7 100644
--- a/deps/v8/src/parsing/pending-compilation-error-handler.h
+++ b/deps/v8/src/parsing/pending-compilation-error-handler.h
@@ -25,9 +25,7 @@ class Script;
// compilation phases.
class PendingCompilationErrorHandler {
public:
- PendingCompilationErrorHandler()
- : has_pending_error_(false), stack_overflow_(false) {}
-
+ PendingCompilationErrorHandler() = default;
PendingCompilationErrorHandler(const PendingCompilationErrorHandler&) =
delete;
PendingCompilationErrorHandler& operator=(
@@ -39,6 +37,10 @@ class PendingCompilationErrorHandler {
void ReportMessageAt(int start_position, int end_position,
MessageTemplate message, const AstRawString* arg);
+ void ReportMessageAt(int start_position, int end_position,
+ MessageTemplate message, const AstRawString* arg0,
+ const char* arg1);
+
void ReportWarningAt(int start_position, int end_position,
MessageTemplate message, const char* arg = nullptr);
@@ -85,24 +87,45 @@ class PendingCompilationErrorHandler {
MessageDetails()
: start_position_(-1),
end_position_(-1),
- message_(MessageTemplate::kNone),
- type_(kNone) {}
+ message_(MessageTemplate::kNone) {}
+ MessageDetails(int start_position, int end_position,
+ MessageTemplate message, const AstRawString* arg0)
+ : start_position_(start_position),
+ end_position_(end_position),
+ message_(message),
+ args_{MessageArgument{arg0}, MessageArgument{}} {}
MessageDetails(int start_position, int end_position,
- MessageTemplate message, const AstRawString* arg)
+ MessageTemplate message, const AstRawString* arg0,
+ const char* arg1)
: start_position_(start_position),
end_position_(end_position),
message_(message),
- arg_(arg),
- type_(arg ? kAstRawString : kNone) {}
+ args_{MessageArgument{arg0}, MessageArgument{arg1}} {
+ DCHECK_NOT_NULL(arg0);
+ DCHECK_NOT_NULL(arg1);
+ }
MessageDetails(int start_position, int end_position,
- MessageTemplate message, const char* char_arg)
+ MessageTemplate message, const char* arg0)
: start_position_(start_position),
end_position_(end_position),
message_(message),
- char_arg_(char_arg),
- type_(char_arg_ ? kConstCharString : kNone) {}
+ args_{MessageArgument{arg0}, MessageArgument{}} {}
+
+ Handle<String> ArgString(Isolate* isolate, int index) const;
+ int ArgCount() const {
+ int argc = 0;
+ for (int i = 0; i < kMaxArgumentCount; i++) {
+ if (args_[i].type == kNone) break;
+ argc++;
+ }
+#ifdef DEBUG
+ for (int i = argc; i < kMaxArgumentCount; i++) {
+ DCHECK_EQ(args_[i].type, kNone);
+ }
+#endif // DEBUG
+ return argc;
+ }
- Handle<String> ArgumentString(Isolate* isolate) const;
MessageLocation GetLocation(Handle<Script> script) const;
MessageTemplate message() const { return message_; }
@@ -117,19 +140,32 @@ class PendingCompilationErrorHandler {
int start_position_;
int end_position_;
+
MessageTemplate message_;
- union {
- const AstRawString* arg_;
- const char* char_arg_;
- Handle<String> arg_handle_;
+
+ struct MessageArgument final {
+ constexpr MessageArgument() : ast_string(nullptr), type(kNone) {}
+ explicit constexpr MessageArgument(const AstRawString* s)
+ : ast_string(s), type(s == nullptr ? kNone : kAstRawString) {}
+ explicit constexpr MessageArgument(const char* s)
+ : c_string(s), type(s == nullptr ? kNone : kConstCharString) {}
+
+ union {
+ const AstRawString* ast_string;
+ const char* c_string;
+ Handle<String> js_string;
+ };
+ Type type;
};
- Type type_;
+
+ static constexpr int kMaxArgumentCount = 2;
+ MessageArgument args_[kMaxArgumentCount];
};
void ThrowPendingError(Isolate* isolate, Handle<Script> script) const;
- bool has_pending_error_;
- bool stack_overflow_;
+ bool has_pending_error_ = false;
+ bool stack_overflow_ = false;
bool unidentifiable_error_ = false;
MessageDetails error_details_;
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index 1643c6ba1a..f368a11f9a 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -666,12 +666,13 @@ void BaseConsumedPreparseData<Data>::RestoreDataForScope(
scope->AsDeclarationScope()->RecordNeedsPrivateNameContextChainRecalc();
}
if (ShouldSaveClassVariableIndexField::decode(scope_data_flags)) {
- Variable* var;
- // An anonymous class whose class variable needs to be saved do not
+ Variable* var = scope->AsClassScope()->class_variable();
+ // An anonymous class whose class variable needs to be saved might not
// have the class variable created during reparse since we skip parsing
// the inner scopes that contain potential access to static private
// methods. So create it now.
- if (scope->AsClassScope()->is_anonymous_class()) {
+ if (var == nullptr) {
+ DCHECK(scope->AsClassScope()->is_anonymous_class());
var = scope->AsClassScope()->DeclareClassVariable(
ast_value_factory, nullptr, kNoSourcePosition);
AstNodeFactory factory(ast_value_factory, zone);
@@ -679,9 +680,6 @@ void BaseConsumedPreparseData<Data>::RestoreDataForScope(
factory.NewVariableDeclaration(kNoSourcePosition);
scope->declarations()->Add(declaration);
declaration->set_var(var);
- } else {
- var = scope->AsClassScope()->class_variable();
- DCHECK_NOT_NULL(var);
}
var->set_is_used();
var->ForceContextAllocation();
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 1949e7f8a7..746802a9aa 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -537,7 +537,7 @@ class PreParserFactory {
PreParserExpression NewTheHoleLiteral() {
return PreParserExpression::Default();
}
- PreParserExpression NewRegExpLiteral(const PreParserIdentifier& js_pattern,
+ PreParserExpression NewRegExpLiteral(const AstRawString* js_pattern,
int js_flags, int pos) {
return PreParserExpression::Default();
}
@@ -1455,12 +1455,9 @@ class PreParser : public ParserBase<PreParser> {
return PreParserExpression::Default();
}
- // Reporting errors.
- void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const char* arg = nullptr) {
- pending_error_handler()->ReportMessageAt(
- source_location.beg_pos, source_location.end_pos, message, arg);
- scanner()->set_parser_error();
+ V8_INLINE const AstRawString* PreParserIdentifierToAstRawString(
+ const PreParserIdentifier& x) {
+ return x.string_;
}
V8_INLINE void ReportUnidentifiableError() {
@@ -1468,19 +1465,6 @@ class PreParser : public ParserBase<PreParser> {
scanner()->set_parser_error();
}
- V8_INLINE void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message,
- const PreParserIdentifier& arg) {
- ReportMessageAt(source_location, message, arg.string_);
- }
-
- void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const AstRawString* arg) {
- pending_error_handler()->ReportMessageAt(
- source_location.beg_pos, source_location.end_pos, message, arg);
- scanner()->set_parser_error();
- }
-
const AstRawString* GetRawNameFromIdentifier(const PreParserIdentifier& arg) {
return arg.string_;
}
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index becc72c12d..a4748f0c33 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -7,7 +7,8 @@
#include <memory>
#include <vector>
-#include "include/v8.h"
+#include "include/v8-callbacks.h"
+#include "include/v8-primitive.h"
#include "src/base/strings.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
diff --git a/deps/v8/src/parsing/scanner-character-streams.h b/deps/v8/src/parsing/scanner-character-streams.h
index 09181356f0..8665ea0b4b 100644
--- a/deps/v8/src/parsing/scanner-character-streams.h
+++ b/deps/v8/src/parsing/scanner-character-streams.h
@@ -7,7 +7,7 @@
#include <memory>
-#include "include/v8.h" // for v8::ScriptCompiler
+#include "include/v8-script.h" // for v8::ScriptCompiler
#include "src/common/globals.h"
namespace v8 {
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index b624694295..cbfd399020 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -978,9 +978,6 @@ bool Scanner::ScanRegExpPattern() {
// worrying whether the following characters are part of the escape
// or not, since any '/', '\\' or '[' is guaranteed to not be part
// of the escape sequence.
-
- // TODO(896): At some point, parse RegExps more thoroughly to capture
- // octal esacpes in strict mode.
} else { // Unescaped character.
if (c0_ == '[') in_character_class = true;
if (c0_ == ']') in_character_class = false;
@@ -993,22 +990,21 @@ bool Scanner::ScanRegExpPattern() {
return true;
}
-Maybe<int> Scanner::ScanRegExpFlags() {
+base::Optional<RegExpFlags> Scanner::ScanRegExpFlags() {
DCHECK_EQ(Token::REGEXP_LITERAL, next().token);
- // Scan regular expression flags.
- JSRegExp::Flags flags;
+ RegExpFlags flags;
while (IsIdentifierPart(c0_)) {
- base::Optional<JSRegExp::Flags> maybe_flag = JSRegExp::FlagFromChar(c0_);
- if (!maybe_flag.has_value()) return Nothing<int>();
- JSRegExp::Flags flag = *maybe_flag;
- if (flags & flag) return Nothing<int>();
+ base::Optional<RegExpFlag> maybe_flag = JSRegExp::FlagFromChar(c0_);
+ if (!maybe_flag.has_value()) return {};
+ RegExpFlag flag = maybe_flag.value();
+ if (flags & flag) return {};
Advance();
flags |= flag;
}
next().location.end_pos = source_pos();
- return Just<int>(flags);
+ return flags;
}
const AstRawString* Scanner::CurrentSymbol(
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 3474f7270d..7ab44d5b20 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -10,7 +10,6 @@
#include <algorithm>
#include <memory>
-#include "include/v8.h"
#include "src/base/logging.h"
#include "src/base/strings.h"
#include "src/common/globals.h"
@@ -18,6 +17,7 @@
#include "src/parsing/literal-buffer.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/token.h"
+#include "src/regexp/regexp-flags.h"
#include "src/strings/char-predicates.h"
#include "src/strings/unicode.h"
#include "src/utils/allocation.h"
@@ -399,7 +399,7 @@ class V8_EXPORT_PRIVATE Scanner {
// Returns true if a pattern is scanned.
bool ScanRegExpPattern();
// Scans the input as regular expression flags. Returns the flags on success.
- Maybe<int> ScanRegExpFlags();
+ base::Optional<RegExpFlags> ScanRegExpFlags();
// Scans the input as a template literal
Token::Value ScanTemplateContinuation() {
diff --git a/deps/v8/src/profiler/allocation-tracker.h b/deps/v8/src/profiler/allocation-tracker.h
index 36b9e91883..a33f08c0d0 100644
--- a/deps/v8/src/profiler/allocation-tracker.h
+++ b/deps/v8/src/profiler/allocation-tracker.h
@@ -8,7 +8,9 @@
#include <map>
#include <vector>
+#include "include/v8-persistent-handle.h"
#include "include/v8-profiler.h"
+#include "include/v8-unwinder.h"
#include "src/base/hashmap.h"
#include "src/base/vector.h"
#include "src/handles/handles.h"
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index a59c9359eb..cf4f549a39 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -7,6 +7,7 @@
#include <unordered_map>
#include <utility>
+#include "include/v8-locker.h"
#include "src/base/lazy-instance.h"
#include "src/base/template-utils.h"
#include "src/debug/debug.h"
@@ -361,6 +362,16 @@ void ProfilerCodeObserver::CodeEventHandler(
CodeEventHandlerInternal(evt_rec);
}
+size_t ProfilerCodeObserver::GetEstimatedMemoryUsage() const {
+ // To avoid race condition in codemap,
+ // for now limit computation in kEagerLogging mode
+ if (!processor_) {
+ return sizeof(*this) + code_map_.GetEstimatedMemoryUsage() +
+ code_entries_.strings().GetStringSize();
+ }
+ return 0;
+}
+
void ProfilerCodeObserver::CodeEventHandlerInternal(
const CodeEventsContainer& evt_rec) {
CodeEventsContainer record = evt_rec;
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index b465f827c9..ea14d6c618 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -268,6 +268,7 @@ class V8_EXPORT_PRIVATE ProfilerCodeObserver : public CodeEventObserver {
CodeEntryStorage* code_entries() { return &code_entries_; }
CodeMap* code_map() { return &code_map_; }
WeakCodeRegistry* weak_code_registry() { return &weak_code_registry_; }
+ size_t GetEstimatedMemoryUsage() const;
void ClearCodeMap();
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 231595dae7..1144fdd15e 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -763,7 +763,12 @@ class IndexedReferencesExtractor : public ObjectVisitor {
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- VisitHeapObjectImpl(rinfo->target_object(), -1);
+ HeapObject object = rinfo->target_object();
+ if (host.IsWeakObject(object)) {
+ generator_->SetWeakReference(parent_, next_index_++, object, {});
+ } else {
+ VisitHeapObjectImpl(rinfo->target_object(), -1);
+ }
}
private:
@@ -774,8 +779,11 @@ class IndexedReferencesExtractor : public ObjectVisitor {
generator_->visited_fields_[field_index] = false;
} else {
HeapObject heap_object;
- if (slot.load(cage_base).GetHeapObject(&heap_object)) {
+ auto loaded_value = slot.load(cage_base);
+ if (loaded_value.GetHeapObjectIfStrong(&heap_object)) {
VisitHeapObjectImpl(heap_object, field_index);
+ } else if (loaded_value.GetHeapObjectIfWeak(&heap_object)) {
+ generator_->SetWeakReference(parent_, next_index_++, heap_object, {});
}
}
}
@@ -1223,15 +1231,20 @@ void V8HeapExplorer::ExtractCodeReferences(HeapEntry* entry, Code code) {
return;
}
- TagObject(code.deoptimization_data(), "(code deopt data)");
- SetInternalReference(entry, "deoptimization_data", code.deoptimization_data(),
- Code::kDeoptimizationDataOffset);
if (code.kind() == CodeKind::BASELINE) {
+ TagObject(code.bytecode_or_interpreter_data(), "(interpreter data)");
+ SetInternalReference(entry, "interpreter_data",
+ code.bytecode_or_interpreter_data(),
+ Code::kDeoptimizationDataOrInterpreterDataOffset);
TagObject(code.bytecode_offset_table(), "(bytecode offset table)");
SetInternalReference(entry, "bytecode_offset_table",
code.bytecode_offset_table(),
Code::kPositionTableOffset);
} else {
+ TagObject(code.deoptimization_data(), "(code deopt data)");
+ SetInternalReference(entry, "deoptimization_data",
+ code.deoptimization_data(),
+ Code::kDeoptimizationDataOrInterpreterDataOffset);
TagObject(code.source_position_table(), "(source position table)");
SetInternalReference(entry, "source_position_table",
code.source_position_table(),
@@ -1781,7 +1794,8 @@ void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry,
}
void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry, int index,
- Object child_obj, int field_offset) {
+ Object child_obj,
+ base::Optional<int> field_offset) {
if (!IsEssentialObject(child_obj)) {
return;
}
@@ -1789,7 +1803,9 @@ void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry, int index,
DCHECK_NOT_NULL(child_entry);
parent_entry->SetNamedReference(
HeapGraphEdge::kWeak, names_->GetFormatted("%d", index), child_entry);
- MarkVisitedField(field_offset);
+ if (field_offset.has_value()) {
+ MarkVisitedField(*field_offset);
+ }
}
void V8HeapExplorer::SetDataOrAccessorPropertyReference(
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 2ab13a99bf..1855aee53c 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -436,7 +436,7 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
void SetWeakReference(HeapEntry* parent_entry, const char* reference_name,
Object child_obj, int field_offset);
void SetWeakReference(HeapEntry* parent_entry, int index, Object child_obj,
- int field_offset);
+ base::Optional<int> field_offset);
void SetPropertyReference(HeapEntry* parent_entry, Name reference_name,
Object child,
const char* name_format_string = nullptr,
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 06aefe9505..34a15159a3 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -64,6 +64,11 @@ int SourcePositionTable::GetInliningId(int pc_offset) const {
return it->inlining_id;
}
+size_t SourcePositionTable::Size() const {
+ return sizeof(*this) + pc_offsets_to_lines_.capacity() *
+ sizeof(decltype(pc_offsets_to_lines_)::value_type);
+}
+
void SourcePositionTable::print() const {
base::OS::Print(" - source position table at %p\n", this);
for (const SourcePositionTuple& pos_info : pc_offsets_to_lines_) {
@@ -207,6 +212,37 @@ void CodeEntry::FillFunctionInfo(SharedFunctionInfo shared) {
}
}
+size_t CodeEntry::EstimatedSize() const {
+ size_t estimated_size = 0;
+ if (rare_data_) {
+ estimated_size += sizeof(rare_data_.get());
+
+ for (const auto& inline_entry : rare_data_->inline_entries_) {
+ estimated_size += inline_entry->EstimatedSize();
+ }
+ estimated_size += rare_data_->inline_entries_.size() *
+ sizeof(decltype(rare_data_->inline_entries_)::value_type);
+
+ for (const auto& inline_stack_pair : rare_data_->inline_stacks_) {
+ estimated_size += inline_stack_pair.second.size() *
+ sizeof(decltype(inline_stack_pair.second)::value_type);
+ }
+ estimated_size +=
+ rare_data_->inline_stacks_.size() *
+ (sizeof(decltype(rare_data_->inline_stacks_)::key_type) +
+ sizeof(decltype(rare_data_->inline_stacks_)::value_type));
+
+ estimated_size +=
+ rare_data_->deopt_inlined_frames_.capacity() *
+ sizeof(decltype(rare_data_->deopt_inlined_frames_)::value_type);
+ }
+
+ if (line_info_) {
+ estimated_size += line_info_.get()->Size();
+ }
+ return sizeof(*this) + estimated_size;
+}
+
CpuProfileDeoptInfo CodeEntry::GetDeoptInfo() {
DCHECK(has_deopt_info());
@@ -423,9 +459,7 @@ class DeleteNodesCallback {
public:
void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
- void AfterAllChildrenTraversed(ProfileNode* node) {
- delete node;
- }
+ void AfterAllChildrenTraversed(ProfileNode* node) { delete node; }
void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
};
@@ -845,6 +879,15 @@ void CodeMap::Print() {
}
}
+size_t CodeMap::GetEstimatedMemoryUsage() const {
+ size_t map_size = 0;
+ for (const auto& pair : code_map_) {
+ map_size += sizeof(pair.first) + sizeof(pair.second) +
+ pair.second.entry->EstimatedSize();
+ }
+ return sizeof(*this) + map_size;
+}
+
CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
: profiler_(nullptr), current_profiles_semaphore_(1) {}
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 3e8d073f63..bb0adbfe3b 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -38,6 +38,7 @@ class V8_EXPORT_PRIVATE SourcePositionTable : public Malloced {
int GetSourceLineNumber(int pc_offset) const;
int GetInliningId(int pc_offset) const;
+ size_t Size() const;
void print() const;
private:
@@ -98,6 +99,7 @@ class CodeEntry {
void set_deopt_info(const char* deopt_reason, int deopt_id,
std::vector<CpuProfileDeoptFrame> inlined_frames);
+ size_t EstimatedSize() const;
CpuProfileDeoptInfo GetDeoptInfo();
bool has_deopt_info() const {
return rare_data_ && rare_data_->deopt_id_ != kNoDeoptimizationId;
@@ -491,6 +493,8 @@ class V8_EXPORT_PRIVATE CodeMap {
void Print();
size_t size() const { return code_map_.size(); }
+ size_t GetEstimatedMemoryUsage() const;
+
CodeEntryStorage& code_entries() { return code_entries_; }
void Clear();
diff --git a/deps/v8/src/profiler/strings-storage.cc b/deps/v8/src/profiler/strings-storage.cc
index 054aa3f80e..37197a5918 100644
--- a/deps/v8/src/profiler/strings-storage.cc
+++ b/deps/v8/src/profiler/strings-storage.cc
@@ -36,6 +36,7 @@ const char* StringsStorage::GetCopy(const char* src) {
base::StrNCpy(dst, src, len);
dst[len] = '\0';
entry->key = dst.begin();
+ string_size_ += len;
}
entry->value =
reinterpret_cast<void*>(reinterpret_cast<size_t>(entry->value) + 1);
@@ -56,6 +57,7 @@ const char* StringsStorage::AddOrDisposeString(char* str, int len) {
if (entry->value == nullptr) {
// New entry added.
entry->key = str;
+ string_size_ += len;
} else {
DeleteArray(str);
}
@@ -156,6 +158,7 @@ bool StringsStorage::Release(const char* str) {
reinterpret_cast<void*>(reinterpret_cast<size_t>(entry->value) - 1);
if (entry->value == 0) {
+ string_size_ -= len;
names_.Remove(const_cast<char*>(str), hash);
DeleteArray(str);
}
@@ -166,6 +169,11 @@ size_t StringsStorage::GetStringCountForTesting() const {
return names_.occupancy();
}
+size_t StringsStorage::GetStringSize() {
+ base::MutexGuard guard(&mutex_);
+ return string_size_;
+}
+
base::HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
uint32_t hash = ComputeStringHash(str, len);
return names_.LookupOrInsert(const_cast<char*>(str), hash);
diff --git a/deps/v8/src/profiler/strings-storage.h b/deps/v8/src/profiler/strings-storage.h
index 7e39c0ee33..1d4c2e44d2 100644
--- a/deps/v8/src/profiler/strings-storage.h
+++ b/deps/v8/src/profiler/strings-storage.h
@@ -47,6 +47,9 @@ class V8_EXPORT_PRIVATE StringsStorage {
// Returns the number of strings in the store.
size_t GetStringCountForTesting() const;
+ // Returns the size of strings in the store
+ size_t GetStringSize();
+
// Returns true if the strings table is empty.
bool empty() const { return names_.occupancy() == 0; }
@@ -62,6 +65,7 @@ class V8_EXPORT_PRIVATE StringsStorage {
base::CustomMatcherHashMap names_;
base::Mutex mutex_;
+ size_t string_size_ = 0;
};
} // namespace internal
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index 253b80d19e..daef48eb26 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -105,7 +105,7 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate,
state->sp = reinterpret_cast<void*>(simulator->sp());
state->fp = reinterpret_cast<void*>(simulator->fp());
state->lr = reinterpret_cast<void*>(simulator->lr());
-#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64
if (!simulator->has_bad_pc()) {
state->pc = reinterpret_cast<void*>(simulator->get_pc());
}
diff --git a/deps/v8/src/profiler/tick-sample.h b/deps/v8/src/profiler/tick-sample.h
index 1bfcb7d097..4402bdc272 100644
--- a/deps/v8/src/profiler/tick-sample.h
+++ b/deps/v8/src/profiler/tick-sample.h
@@ -5,7 +5,7 @@
#ifndef V8_PROFILER_TICK_SAMPLE_H_
#define V8_PROFILER_TICK_SAMPLE_H_
-#include "include/v8.h"
+#include "include/v8-unwinder.h"
#include "src/base/platform/time.h"
#include "src/common/globals.h"
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 6edb133576..67793ffc41 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -185,7 +185,6 @@ void RegExpMacroAssemblerARM64::AdvanceRegister(int reg, int by) {
}
default:
UNREACHABLE();
- break;
}
}
}
diff --git a/deps/v8/src/regexp/experimental/experimental-compiler.cc b/deps/v8/src/regexp/experimental/experimental-compiler.cc
index 8b1d841536..ae4abce7b5 100644
--- a/deps/v8/src/regexp/experimental/experimental-compiler.cc
+++ b/deps/v8/src/regexp/experimental/experimental-compiler.cc
@@ -16,12 +16,14 @@ namespace {
// TODO(mbid, v8:10765): Currently the experimental engine doesn't support
// UTF-16, but this shouldn't be too hard to implement.
constexpr base::uc32 kMaxSupportedCodepoint = 0xFFFFu;
+#ifdef DEBUG
+constexpr base::uc32 kMaxCodePoint = 0x10ffff;
+#endif // DEBUG
class CanBeHandledVisitor final : private RegExpVisitor {
// Visitor to implement `ExperimentalRegExp::CanBeHandled`.
public:
- static bool Check(RegExpTree* tree, JSRegExp::Flags flags,
- int capture_count) {
+ static bool Check(RegExpTree* tree, RegExpFlags flags, int capture_count) {
if (!AreSuitableFlags(flags)) return false;
CanBeHandledVisitor visitor;
tree->Accept(&visitor, nullptr);
@@ -31,15 +33,15 @@ class CanBeHandledVisitor final : private RegExpVisitor {
private:
CanBeHandledVisitor() = default;
- static bool AreSuitableFlags(JSRegExp::Flags flags) {
+ static bool AreSuitableFlags(RegExpFlags flags) {
// TODO(mbid, v8:10765): We should be able to support all flags in the
// future.
- static constexpr JSRegExp::Flags kAllowedFlags =
- JSRegExp::kGlobal | JSRegExp::kSticky | JSRegExp::kMultiline |
- JSRegExp::kDotAll | JSRegExp::kLinear;
+ static constexpr RegExpFlags kAllowedFlags =
+ RegExpFlag::kGlobal | RegExpFlag::kSticky | RegExpFlag::kMultiline |
+ RegExpFlag::kDotAll | RegExpFlag::kLinear;
// We support Unicode iff kUnicode is among the supported flags.
STATIC_ASSERT(ExperimentalRegExp::kSupportsUnicode ==
- ((kAllowedFlags & JSRegExp::kUnicode) != 0));
+ IsUnicode(kAllowedFlags));
return (flags & ~kAllowedFlags) == 0;
}
@@ -173,7 +175,7 @@ class CanBeHandledVisitor final : private RegExpVisitor {
} // namespace
bool ExperimentalRegExpCompiler::CanBeHandled(RegExpTree* tree,
- JSRegExp::Flags flags,
+ RegExpFlags flags,
int capture_count) {
return CanBeHandledVisitor::Check(tree, flags, capture_count);
}
@@ -294,11 +296,10 @@ class BytecodeAssembler {
class CompileVisitor : private RegExpVisitor {
public:
static ZoneList<RegExpInstruction> Compile(RegExpTree* tree,
- JSRegExp::Flags flags,
- Zone* zone) {
+ RegExpFlags flags, Zone* zone) {
CompileVisitor compiler(zone);
- if ((flags & JSRegExp::kSticky) == 0 && !tree->IsAnchoredAtStart()) {
+ if (!IsSticky(flags) && !tree->IsAnchoredAtStart()) {
// The match is not anchored, i.e. may start at any input position, so we
// emit a preamble corresponding to /.*?/. This skips an arbitrary
// prefix in the input non-greedily.
@@ -409,7 +410,7 @@ class CompileVisitor : private RegExpVisitor {
base::uc16 from_uc16 = static_cast<base::uc16>(from);
base::uc32 to = (*ranges)[i].to();
- DCHECK_IMPLIES(to > kMaxSupportedCodepoint, to == String::kMaxCodePoint);
+ DCHECK_IMPLIES(to > kMaxSupportedCodepoint, to == kMaxCodePoint);
base::uc16 to_uc16 =
static_cast<base::uc16>(std::min(to, kMaxSupportedCodepoint));
@@ -627,7 +628,7 @@ class CompileVisitor : private RegExpVisitor {
} // namespace
ZoneList<RegExpInstruction> ExperimentalRegExpCompiler::Compile(
- RegExpTree* tree, JSRegExp::Flags flags, Zone* zone) {
+ RegExpTree* tree, RegExpFlags flags, Zone* zone) {
return CompileVisitor::Compile(tree, flags, zone);
}
diff --git a/deps/v8/src/regexp/experimental/experimental-compiler.h b/deps/v8/src/regexp/experimental/experimental-compiler.h
index 87abcd3917..e6abf0557f 100644
--- a/deps/v8/src/regexp/experimental/experimental-compiler.h
+++ b/deps/v8/src/regexp/experimental/experimental-compiler.h
@@ -7,6 +7,7 @@
#include "src/regexp/experimental/experimental-bytecode.h"
#include "src/regexp/regexp-ast.h"
+#include "src/regexp/regexp-flags.h"
#include "src/zone/zone-list.h"
namespace v8 {
@@ -19,13 +20,13 @@ class ExperimentalRegExpCompiler final : public AllStatic {
// but see the definition.
// TODO(mbid,v8:10765): Currently more things are not handled, e.g. some
// quantifiers and unicode.
- static bool CanBeHandled(RegExpTree* tree, JSRegExp::Flags flags,
+ static bool CanBeHandled(RegExpTree* tree, RegExpFlags flags,
int capture_count);
// Compile regexp into a bytecode program. The regexp must be handlable by
// the experimental engine; see`CanBeHandled`. The program is returned as a
// ZoneList backed by the same Zone that is used in the RegExpTree argument.
static ZoneList<RegExpInstruction> Compile(RegExpTree* tree,
- JSRegExp::Flags flags, Zone* zone);
+ RegExpFlags flags, Zone* zone);
};
} // namespace internal
diff --git a/deps/v8/src/regexp/experimental/experimental-interpreter.h b/deps/v8/src/regexp/experimental/experimental-interpreter.h
index d65299499b..a21b01639a 100644
--- a/deps/v8/src/regexp/experimental/experimental-interpreter.h
+++ b/deps/v8/src/regexp/experimental/experimental-interpreter.h
@@ -5,15 +5,14 @@
#ifndef V8_REGEXP_EXPERIMENTAL_EXPERIMENTAL_INTERPRETER_H_
#define V8_REGEXP_EXPERIMENTAL_EXPERIMENTAL_INTERPRETER_H_
-#include "src/base/vector.h"
-#include "src/objects/fixed-array.h"
-#include "src/objects/string.h"
#include "src/regexp/experimental/experimental-bytecode.h"
#include "src/regexp/regexp.h"
namespace v8 {
namespace internal {
+class ByteArray;
+class String;
class Zone;
class ExperimentalRegExpInterpreter final : public AllStatic {
diff --git a/deps/v8/src/regexp/experimental/experimental.cc b/deps/v8/src/regexp/experimental/experimental.cc
index bff2d7da66..c05a010d06 100644
--- a/deps/v8/src/regexp/experimental/experimental.cc
+++ b/deps/v8/src/regexp/experimental/experimental.cc
@@ -14,7 +14,7 @@
namespace v8 {
namespace internal {
-bool ExperimentalRegExp::CanBeHandled(RegExpTree* tree, JSRegExp::Flags flags,
+bool ExperimentalRegExp::CanBeHandled(RegExpTree* tree, RegExpFlags flags,
int capture_count) {
DCHECK(FLAG_enable_experimental_regexp_engine ||
FLAG_enable_experimental_regexp_engine_on_excessive_backtracks);
@@ -22,16 +22,16 @@ bool ExperimentalRegExp::CanBeHandled(RegExpTree* tree, JSRegExp::Flags flags,
}
void ExperimentalRegExp::Initialize(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> source,
- JSRegExp::Flags flags, int capture_count) {
+ Handle<String> source, RegExpFlags flags,
+ int capture_count) {
DCHECK(FLAG_enable_experimental_regexp_engine);
if (FLAG_trace_experimental_regexp_engine) {
StdoutStream{} << "Initializing experimental regexp " << *source
<< std::endl;
}
- isolate->factory()->SetRegExpExperimentalData(re, source, flags,
- capture_count);
+ isolate->factory()->SetRegExpExperimentalData(
+ re, source, JSRegExp::AsJSRegExpFlags(flags), capture_count);
}
bool ExperimentalRegExp::IsCompiled(Handle<JSRegExp> re, Isolate* isolate) {
@@ -69,15 +69,14 @@ base::Optional<CompilationResult> CompileImpl(Isolate* isolate,
Zone zone(isolate->allocator(), ZONE_NAME);
Handle<String> source(regexp->Pattern(), isolate);
- JSRegExp::Flags flags = regexp->GetFlags();
// Parse and compile the regexp source.
RegExpCompileData parse_result;
- FlatStringReader reader(isolate, source);
DCHECK(!isolate->has_pending_exception());
- bool parse_success =
- RegExpParser::ParseRegExp(isolate, &zone, &reader, flags, &parse_result);
+ bool parse_success = RegExpParser::ParseRegExpFromHeapString(
+ isolate, &zone, source, JSRegExp::AsRegExpFlags(regexp->GetFlags()),
+ &parse_result);
if (!parse_success) {
// The pattern was already parsed successfully during initialization, so
// the only way parsing can fail now is because of stack overflow.
@@ -87,12 +86,13 @@ base::Optional<CompilationResult> CompileImpl(Isolate* isolate,
return base::nullopt;
}
- ZoneList<RegExpInstruction> bytecode =
- ExperimentalRegExpCompiler::Compile(parse_result.tree, flags, &zone);
+ ZoneList<RegExpInstruction> bytecode = ExperimentalRegExpCompiler::Compile(
+ parse_result.tree, JSRegExp::AsRegExpFlags(regexp->GetFlags()), &zone);
CompilationResult result;
result.bytecode = VectorToByteArray(isolate, bytecode.ToVector());
- result.capture_name_map = parse_result.capture_name_map;
+ result.capture_name_map =
+ RegExp::CreateCaptureNameMap(isolate, parse_result.named_captures);
return result;
}
diff --git a/deps/v8/src/regexp/experimental/experimental.h b/deps/v8/src/regexp/experimental/experimental.h
index 1b44100cc8..5987fb4d77 100644
--- a/deps/v8/src/regexp/experimental/experimental.h
+++ b/deps/v8/src/regexp/experimental/experimental.h
@@ -5,6 +5,7 @@
#ifndef V8_REGEXP_EXPERIMENTAL_EXPERIMENTAL_H_
#define V8_REGEXP_EXPERIMENTAL_EXPERIMENTAL_H_
+#include "src/regexp/regexp-flags.h"
#include "src/regexp/regexp.h"
namespace v8 {
@@ -19,10 +20,10 @@ class ExperimentalRegExp final : public AllStatic {
// TODO(mbid, v8:10765): This walks the RegExpTree, but it could also be
// checked on the fly in the parser. Not done currently because walking the
// AST again is more flexible and less error prone (but less performant).
- static bool CanBeHandled(RegExpTree* tree, JSRegExp::Flags flags,
+ static bool CanBeHandled(RegExpTree* tree, RegExpFlags flags,
int capture_count);
static void Initialize(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern, JSRegExp::Flags flags,
+ Handle<String> pattern, RegExpFlags flags,
int capture_count);
static bool IsCompiled(Handle<JSRegExp> re, Isolate* isolate);
V8_WARN_UNUSED_RESULT
diff --git a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
new file mode 100644
index 0000000000..d95a6e7d60
--- /dev/null
+++ b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
@@ -0,0 +1,1264 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/regexp/loong64/regexp-macro-assembler-loong64.h"
+
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-stack.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#include "src/strings/unicode.h"
+
+namespace v8 {
+namespace internal {
+
+/* clang-format off
+ *
+ * This assembler uses the following register assignment convention
+ * - t3 : Temporarily stores the index of capture start after a matching pass
+ * for a global regexp.
+ * - a5 : Pointer to current Code object including heap object tag.
+ * - a6 : Current position in input, as negative offset from end of string.
+ * Please notice that this is the byte offset, not the character offset!
+ * - a7 : Currently loaded character. Must be loaded using
+ * LoadCurrentCharacter before using any of the dispatch methods.
+ * - t0 : Points to tip of backtrack stack
+ * - t1 : Unused.
+ * - t2 : End of input (points to byte after last character in input).
+ * - fp : Frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - sp : Points to tip of C stack.
+ *
+ * The remaining registers are free for computations.
+ * Each call to a public method should retain this convention.
+ *
+ * The stack will have the following structure:
+ *
+ * - fp[80] Isolate* isolate (address of the current isolate) kIsolate
+ * kStackFrameHeader
+ * --- sp when called ---
+ * - fp[72] ra Return from RegExp code (ra). kReturnAddress
+ * - fp[64] old-fp Old fp, callee saved.
+ * - fp[0..63] s0..s7 Callee-saved registers s0..s7.
+ * --- frame pointer ----
+ * - fp[-8] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall
+ * - fp[-16] stack_base (Top of backtracking stack). kStackHighEnd
+ * - fp[-24] capture array size (may fit multiple sets of matches) kNumOutputRegisters
+ * - fp[-32] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput
+ * - fp[-40] end of input (address of end of string). kInputEnd
+ * - fp[-48] start of input (address of first character in string). kInputStart
+ * - fp[-56] start index (character index of start). kStartIndex
+ * - fp[-64] void* input_string (location of a handle containing the string). kInputString
+ * - fp[-72] success counter (only for global regexps to count matches). kSuccessfulCaptures
+ * - fp[-80] Offset of location before start of input (effectively character kStringStartMinusOne
+ * position -1). Used to initialize capture registers to a
+ * non-position.
+ * --------- The following output registers are 32-bit values. ---------
+ * - fp[-88] register 0 (Only positions must be stored in the first kRegisterZero
+ * - register 1 num_saved_registers_ registers)
+ * - ...
+ * - register num_registers-1
+ * --- sp ---
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
+ * int (*match)(String input_string,
+ * int start_index,
+ * Address start,
+ * Address end,
+ * int* capture_output_array,
+ * int num_capture_registers,
+ * byte* stack_area_base,
+ * bool direct_call = false,
+ * Isolate* isolate);
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
+ *
+ * clang-format on
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+const int RegExpMacroAssemblerLOONG64::kRegExpCodeSize;
+
+RegExpMacroAssemblerLOONG64::RegExpMacroAssemblerLOONG64(Isolate* isolate,
+ Zone* zone, Mode mode,
+ int registers_to_save)
+ : NativeRegExpMacroAssembler(isolate, zone),
+ masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_(),
+ internal_failure_label_() {
+ masm_->set_root_array_available(false);
+
+ DCHECK_EQ(0, registers_to_save % 2);
+ __ jmp(&entry_label_); // We'll write the entry code later.
+ // If the code gets too big or corrupted, an internal exception will be
+ // raised, and we will exit right away.
+ __ bind(&internal_failure_label_);
+ __ li(a0, Operand(FAILURE));
+ __ Ret();
+ __ bind(&start_label_); // And then continue from here.
+}
+
+RegExpMacroAssemblerLOONG64::~RegExpMacroAssemblerLOONG64() {
+ delete masm_;
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+ internal_failure_label_.Unuse();
+ fallback_label_.Unuse();
+}
+
+int RegExpMacroAssemblerLOONG64::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+void RegExpMacroAssemblerLOONG64::AdvanceCurrentPosition(int by) {
+ if (by != 0) {
+ __ Add_d(current_input_offset(), current_input_offset(),
+ Operand(by * char_size()));
+ }
+}
+
+void RegExpMacroAssemblerLOONG64::AdvanceRegister(int reg, int by) {
+ DCHECK_LE(0, reg);
+ DCHECK_GT(num_registers_, reg);
+ if (by != 0) {
+ __ Ld_d(a0, register_location(reg));
+ __ Add_d(a0, a0, Operand(by));
+ __ St_d(a0, register_location(reg));
+ }
+}
+
+void RegExpMacroAssemblerLOONG64::Backtrack() {
+ CheckPreemption();
+ if (has_backtrack_limit()) {
+ Label next;
+ __ Ld_d(a0, MemOperand(frame_pointer(), kBacktrackCount));
+ __ Add_d(a0, a0, Operand(1));
+ __ St_d(a0, MemOperand(frame_pointer(), kBacktrackCount));
+ __ Branch(&next, ne, a0, Operand(backtrack_limit()));
+
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ jmp(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
+
+ __ bind(&next);
+ }
+ // Pop Code offset from backtrack stack, add Code and jump to location.
+ Pop(a0);
+ __ Add_d(a0, a0, code_pointer());
+ __ Jump(a0);
+}
+
+void RegExpMacroAssemblerLOONG64::Bind(Label* label) { __ bind(label); }
+
+void RegExpMacroAssemblerLOONG64::CheckCharacter(uint32_t c, Label* on_equal) {
+ BranchOrBacktrack(on_equal, eq, current_character(), Operand(c));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckCharacterGT(base::uc16 limit,
+ Label* on_greater) {
+ BranchOrBacktrack(on_greater, gt, current_character(), Operand(limit));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckAtStart(int cp_offset,
+ Label* on_at_start) {
+ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add_d(a0, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
+ BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add_d(a0, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
+ BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckCharacterLT(base::uc16 limit,
+ Label* on_less) {
+ BranchOrBacktrack(on_less, lt, current_character(), Operand(limit));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckGreedyLoop(Label* on_equal) {
+ Label backtrack_non_equal;
+ __ Ld_w(a0, MemOperand(backtrack_stackpointer(), 0));
+ __ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0));
+ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(),
+ Operand(kIntSize));
+ __ bind(&backtrack_non_equal);
+ BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckNotBackReferenceIgnoreCase(
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
+ Label fallthrough;
+ __ Ld_d(a0, register_location(start_reg)); // Index of start of capture.
+ __ Ld_d(a1, register_location(start_reg + 1)); // Index of end of capture.
+ __ Sub_d(a1, a1, a0); // Length of capture.
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
+ __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+ if (read_backward) {
+ __ Ld_d(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add_d(t1, t1, a1);
+ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
+ } else {
+ __ Add_d(t1, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+ }
+
+ if (mode_ == LATIN1) {
+ Label success;
+ Label fail;
+ Label loop_check;
+
+ // a0 - offset of start of capture.
+ // a1 - length of capture.
+ __ Add_d(a0, a0, Operand(end_of_input_address()));
+ __ Add_d(a2, end_of_input_address(), Operand(current_input_offset()));
+ if (read_backward) {
+ __ Sub_d(a2, a2, Operand(a1));
+ }
+ __ Add_d(a1, a0, Operand(a1));
+
+ // a0 - Address of start of capture.
+ // a1 - Address of end of capture.
+ // a2 - Address of current input position.
+
+ Label loop;
+ __ bind(&loop);
+ __ Ld_bu(a3, MemOperand(a0, 0));
+ __ addi_d(a0, a0, char_size());
+ __ Ld_bu(a4, MemOperand(a2, 0));
+ __ addi_d(a2, a2, char_size());
+
+ __ Branch(&loop_check, eq, a4, Operand(a3));
+
+ // Mismatch, try case-insensitive match (converting letters to lower-case).
+ __ Or(a3, a3, Operand(0x20)); // Convert capture character to lower-case.
+ __ Or(a4, a4, Operand(0x20)); // Also convert input character.
+ __ Branch(&fail, ne, a4, Operand(a3));
+ __ Sub_d(a3, a3, Operand('a'));
+ __ Branch(&loop_check, ls, a3, Operand('z' - 'a'));
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ Sub_d(a3, a3, Operand(224 - 'a'));
+ // Weren't Latin-1 letters.
+ __ Branch(&fail, hi, a3, Operand(254 - 224));
+ // Check for 247.
+ __ Branch(&fail, eq, a3, Operand(247 - 224));
+
+ __ bind(&loop_check);
+ __ Branch(&loop, lt, a0, Operand(a1));
+ __ jmp(&success);
+
+ __ bind(&fail);
+ GoTo(on_no_match);
+
+ __ bind(&success);
+ // Compute new value of character position after the matched part.
+ __ Sub_d(current_input_offset(), a2, end_of_input_address());
+ if (read_backward) {
+ __ Ld_d(t1, register_location(start_reg)); // Index of start of capture.
+ __ Ld_d(a2,
+ register_location(start_reg + 1)); // Index of end of capture.
+ __ Add_d(current_input_offset(), current_input_offset(), Operand(t1));
+ __ Sub_d(current_input_offset(), current_input_offset(), Operand(a2));
+ }
+ } else {
+ DCHECK(mode_ == UC16);
+ // Put regexp engine registers on stack.
+ RegList regexp_registers_to_retain = current_input_offset().bit() |
+ current_character().bit() |
+ backtrack_stackpointer().bit();
+ __ MultiPush(regexp_registers_to_retain);
+
+ int argument_count = 4;
+ __ PrepareCallCFunction(argument_count, a2);
+
+ // a0 - offset of start of capture.
+ // a1 - length of capture.
+
+ // Put arguments into arguments registers.
+ // Parameters are
+ // a0: Address byte_offset1 - Address captured substring's start.
+ // a1: Address byte_offset2 - Address of current character position.
+ // a2: size_t byte_length - length of capture in bytes(!).
+ // a3: Isolate* isolate.
+
+ // Address of start of capture.
+ __ Add_d(a0, a0, Operand(end_of_input_address()));
+ // Length of capture.
+ __ mov(a2, a1);
+ // Save length in callee-save register for use on return.
+ __ mov(s3, a1);
+ // Address of current input position.
+ __ Add_d(a1, current_input_offset(), Operand(end_of_input_address()));
+ if (read_backward) {
+ __ Sub_d(a1, a1, Operand(s3));
+ }
+ // Isolate.
+ __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference function =
+ unicode ? ExternalReference::re_case_insensitive_compare_unicode(
+ isolate())
+ : ExternalReference::re_case_insensitive_compare_non_unicode(
+ isolate());
+ __ CallCFunction(function, argument_count);
+ }
+
+ // Restore regexp engine registers.
+ __ MultiPop(regexp_registers_to_retain);
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+
+ // Check if function returned non-zero for success or zero for failure.
+ BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg));
+ // On success, increment position by length of capture.
+ if (read_backward) {
+ __ Sub_d(current_input_offset(), current_input_offset(), Operand(s3));
+ } else {
+ __ Add_d(current_input_offset(), current_input_offset(), Operand(s3));
+ }
+ }
+
+ __ bind(&fallthrough);
+}
+
+void RegExpMacroAssemblerLOONG64::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
+ Label fallthrough;
+
+ // Find length of back-referenced capture.
+ __ Ld_d(a0, register_location(start_reg));
+ __ Ld_d(a1, register_location(start_reg + 1));
+ __ Sub_d(a1, a1, a0); // Length to check.
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
+ __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+ if (read_backward) {
+ __ Ld_d(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add_d(t1, t1, a1);
+ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
+ } else {
+ __ Add_d(t1, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+ }
+
+ // Compute pointers to match string and capture string.
+ __ Add_d(a0, a0, Operand(end_of_input_address()));
+ __ Add_d(a2, end_of_input_address(), Operand(current_input_offset()));
+ if (read_backward) {
+ __ Sub_d(a2, a2, Operand(a1));
+ }
+ __ Add_d(a1, a1, Operand(a0));
+
+ Label loop;
+ __ bind(&loop);
+ if (mode_ == LATIN1) {
+ __ Ld_bu(a3, MemOperand(a0, 0));
+ __ addi_d(a0, a0, char_size());
+ __ Ld_bu(a4, MemOperand(a2, 0));
+ __ addi_d(a2, a2, char_size());
+ } else {
+ DCHECK(mode_ == UC16);
+ __ Ld_hu(a3, MemOperand(a0, 0));
+ __ addi_d(a0, a0, char_size());
+ __ Ld_hu(a4, MemOperand(a2, 0));
+ __ addi_d(a2, a2, char_size());
+ }
+ BranchOrBacktrack(on_no_match, ne, a3, Operand(a4));
+ __ Branch(&loop, lt, a0, Operand(a1));
+
+ // Move current character position to position after match.
+ __ Sub_d(current_input_offset(), a2, end_of_input_address());
+ if (read_backward) {
+ __ Ld_d(t1, register_location(start_reg)); // Index of start of capture.
+ __ Ld_d(a2, register_location(start_reg + 1)); // Index of end of capture.
+ __ Add_d(current_input_offset(), current_input_offset(), Operand(t1));
+ __ Sub_d(current_input_offset(), current_input_offset(), Operand(a2));
+ }
+ __ bind(&fallthrough);
+}
+
+void RegExpMacroAssemblerLOONG64::CheckNotCharacter(uint32_t c,
+ Label* on_not_equal) {
+ BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal) {
+ __ And(a0, current_character(), Operand(mask));
+ Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c);
+ BranchOrBacktrack(on_equal, eq, a0, rhs);
+}
+
+void RegExpMacroAssemblerLOONG64::CheckNotCharacterAfterAnd(
+ uint32_t c, uint32_t mask, Label* on_not_equal) {
+ __ And(a0, current_character(), Operand(mask));
+ Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c);
+ BranchOrBacktrack(on_not_equal, ne, a0, rhs);
+}
+
+void RegExpMacroAssemblerLOONG64::CheckNotCharacterAfterMinusAnd(
+ base::uc16 c, base::uc16 minus, base::uc16 mask, Label* on_not_equal) {
+ DCHECK_GT(String::kMaxUtf16CodeUnit, minus);
+ __ Sub_d(a0, current_character(), Operand(minus));
+ __ And(a0, a0, Operand(mask));
+ BranchOrBacktrack(on_not_equal, ne, a0, Operand(c));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckCharacterInRange(base::uc16 from,
+ base::uc16 to,
+ Label* on_in_range) {
+ __ Sub_d(a0, current_character(), Operand(from));
+ // Unsigned lower-or-same condition.
+ BranchOrBacktrack(on_in_range, ls, a0, Operand(to - from));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckCharacterNotInRange(
+ base::uc16 from, base::uc16 to, Label* on_not_in_range) {
+ __ Sub_d(a0, current_character(), Operand(from));
+ // Unsigned higher condition.
+ BranchOrBacktrack(on_not_in_range, hi, a0, Operand(to - from));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckBitInTable(Handle<ByteArray> table,
+ Label* on_bit_set) {
+ __ li(a0, Operand(table));
+ if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
+ __ And(a1, current_character(), Operand(kTableSize - 1));
+ __ Add_d(a0, a0, a1);
+ } else {
+ __ Add_d(a0, a0, current_character());
+ }
+
+ __ Ld_bu(a0, FieldMemOperand(a0, ByteArray::kHeaderSize));
+ BranchOrBacktrack(on_bit_set, ne, a0, Operand(zero_reg));
+}
+
+bool RegExpMacroAssemblerLOONG64::CheckSpecialCharacterClass(
+ base::uc16 type, Label* on_no_match) {
+ // Range checks (c in min..max) are generally implemented by an unsigned
+ // (c - min) <= (max - min) check.
+ switch (type) {
+ case 's':
+ // Match space-characters.
+ if (mode_ == LATIN1) {
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
+ Label success;
+ __ Branch(&success, eq, current_character(), Operand(' '));
+ // Check range 0x09..0x0D.
+ __ Sub_d(a0, current_character(), Operand('\t'));
+ __ Branch(&success, ls, a0, Operand('\r' - '\t'));
+ // \u00a0 (NBSP).
+ BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00A0 - '\t'));
+ __ bind(&success);
+ return true;
+ }
+ return false;
+ case 'S':
+ // The emitted code for generic character classes is good enough.
+ return false;
+ case 'd':
+ // Match Latin1 digits ('0'..'9').
+ __ Sub_d(a0, current_character(), Operand('0'));
+ BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0'));
+ return true;
+ case 'D':
+ // Match non Latin1-digits.
+ __ Sub_d(a0, current_character(), Operand('0'));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0'));
+ return true;
+ case '.': {
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029).
+ __ Xor(a0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C.
+ __ Sub_d(a0, a0, Operand(0x0B));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0C - 0x0B));
+ if (mode_ == UC16) {
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ Sub_d(a0, a0, Operand(0x2028 - 0x0B));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand(1));
+ }
+ return true;
+ }
+ case 'n': {
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029).
+ __ Xor(a0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C.
+ __ Sub_d(a0, a0, Operand(0x0B));
+ if (mode_ == LATIN1) {
+ BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0C - 0x0B));
+ } else {
+ Label done;
+ BranchOrBacktrack(&done, ls, a0, Operand(0x0C - 0x0B));
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ Sub_d(a0, a0, Operand(0x2028 - 0x0B));
+ BranchOrBacktrack(on_no_match, hi, a0, Operand(1));
+ __ bind(&done);
+ }
+ return true;
+ }
+ case 'w': {
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
+ BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z'));
+ }
+ ExternalReference map =
+ ExternalReference::re_word_character_map(isolate());
+ __ li(a0, Operand(map));
+ __ Add_d(a0, a0, current_character());
+ __ Ld_bu(a0, MemOperand(a0, 0));
+ BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg));
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
+ __ Branch(&done, hi, current_character(), Operand('z'));
+ }
+ ExternalReference map =
+ ExternalReference::re_word_character_map(isolate());
+ __ li(a0, Operand(map));
+ __ Add_d(a0, a0, current_character());
+ __ Ld_bu(a0, MemOperand(a0, 0));
+ BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg));
+ if (mode_ != LATIN1) {
+ __ bind(&done);
+ }
+ return true;
+ }
+ case '*':
+ // Match any character.
+ return true;
+ // No custom implementation (yet): s(UC16), S(UC16).
+ default:
+ return false;
+ }
+}
+
+void RegExpMacroAssemblerLOONG64::Fail() {
+ __ li(a0, Operand(FAILURE));
+ __ jmp(&exit_label_);
+}
+
+Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
+ Label return_v0;
+ if (0 /* todo masm_->has_exception()*/) {
+ // If the code gets corrupted due to long regular expressions and lack of
+ // space on trampolines, an internal exception flag is set. If this case
+ // is detected, we will jump into exit sequence right away.
+ //__ bind_to(&entry_label_, internal_failure_label_.pos());
+ } else {
+ // Finalize code - write the entry point code now we know how many
+ // registers we need.
+
+ // Entry code:
+ __ bind(&entry_label_);
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL,
+ // no is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Actually emit code to start a new stack frame.
+ // Push arguments
+ // Save callee-save registers.
+ // Start new stack frame.
+ // Store link register in existing stack-cell.
+ // Order here should correspond to order of offset constants in header file.
+ // TODO(plind): we save s0..s7, but ONLY use s3 here - use the regs
+ // or dont save.
+ RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() | s3.bit() |
+ s4.bit() | s5.bit() | s6.bit() | s7.bit();
+ RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+
+ argument_registers |= a4.bit() | a5.bit() | a6.bit() | a7.bit();
+
+ __ MultiPush(ra.bit(), fp.bit(), argument_registers | registers_to_retain);
+ // Set frame pointer in space for it if this is not a direct call
+ // from generated code.
+ // TODO(plind): this 8 is the # of argument regs, should have definition.
+ __ Add_d(frame_pointer(), sp, Operand(8 * kPointerSize));
+ STATIC_ASSERT(kSuccessfulCaptures == kInputString - kSystemPointerSize);
+ __ mov(a0, zero_reg);
+ __ Push(a0); // Make room for success counter and initialize it to 0.
+ STATIC_ASSERT(kStringStartMinusOne ==
+ kSuccessfulCaptures - kSystemPointerSize);
+ __ Push(a0); // Make room for "string start - 1" constant.
+ STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
+ __ Push(a0); // The backtrack counter
+
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit;
+ Label stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(masm_->isolate());
+ __ li(a0, Operand(stack_limit));
+ __ Ld_d(a0, MemOperand(a0, 0));
+ __ Sub_d(a0, sp, a0);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize));
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ li(a0, Operand(EXCEPTION));
+ __ jmp(&return_v0);
+
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(a0);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ Branch(&return_v0, ne, a0, Operand(zero_reg));
+
+ __ bind(&stack_ok);
+ // Allocate space on stack for registers.
+ __ Sub_d(sp, sp, Operand(num_registers_ * kPointerSize));
+ // Load string end.
+ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ // Load input start.
+ __ Ld_d(a0, MemOperand(frame_pointer(), kInputStart));
+ // Find negative length (offset of start relative to end).
+ __ Sub_d(current_input_offset(), a0, end_of_input_address());
+ // Set a0 to address of char before start of the input string
+ // (effectively string position -1).
+ __ Ld_d(a1, MemOperand(frame_pointer(), kStartIndex));
+ __ Sub_d(a0, current_input_offset(), Operand(char_size()));
+ __ slli_d(t1, a1, (mode_ == UC16) ? 1 : 0);
+ __ Sub_d(a0, a0, t1);
+ // Store this value in a local variable, for use when clearing
+ // position registers.
+ __ St_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+
+ // Initialize code pointer register
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg));
+ __ li(current_character(), Operand('\n'));
+ __ jmp(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+
+ // Initialize on-stack registers.
+ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
+ // Fill saved registers with initial value = start offset - 1.
+ if (num_saved_registers_ > 8) {
+ // Address of register 0.
+ __ Add_d(a1, frame_pointer(), Operand(kRegisterZero));
+ __ li(a2, Operand(num_saved_registers_));
+ Label init_loop;
+ __ bind(&init_loop);
+ __ St_d(a0, MemOperand(a1, 0));
+ __ Add_d(a1, a1, Operand(-kPointerSize));
+ __ Sub_d(a2, a2, Operand(1));
+ __ Branch(&init_loop, ne, a2, Operand(zero_reg));
+ } else {
+ for (int i = 0; i < num_saved_registers_; i++) {
+ __ St_d(a0, register_location(i));
+ }
+ }
+ }
+
+ // Initialize backtrack stack pointer.
+ __ Ld_d(backtrack_stackpointer(),
+ MemOperand(frame_pointer(), kStackHighEnd));
+
+ __ jmp(&start_label_);
+
+ // Exit code:
+ if (success_label_.is_linked()) {
+ // Save captures when successful.
+ __ bind(&success_label_);
+ if (num_saved_registers_ > 0) {
+ // Copy captures to output.
+ __ Ld_d(a1, MemOperand(frame_pointer(), kInputStart));
+ __ Ld_d(a0, MemOperand(frame_pointer(), kRegisterOutput));
+ __ Ld_d(a2, MemOperand(frame_pointer(), kStartIndex));
+ __ Sub_d(a1, end_of_input_address(), a1);
+ // a1 is length of input in bytes.
+ if (mode_ == UC16) {
+ __ srli_d(a1, a1, 1);
+ }
+ // a1 is length of input in characters.
+ __ Add_d(a1, a1, Operand(a2));
+ // a1 is length of string in characters.
+
+ DCHECK_EQ(0, num_saved_registers_ % 2);
+ // Always an even number of capture registers. This allows us to
+ // unroll the loop once to add an operation between a load of a register
+ // and the following use of that register.
+ for (int i = 0; i < num_saved_registers_; i += 2) {
+ __ Ld_d(a2, register_location(i));
+ __ Ld_d(a3, register_location(i + 1));
+ if (i == 0 && global_with_zero_length_check()) {
+ // Keep capture start in a4 for the zero-length check later.
+ __ mov(t3, a2);
+ }
+ if (mode_ == UC16) {
+ __ srai_d(a2, a2, 1);
+ __ Add_d(a2, a2, a1);
+ __ srai_d(a3, a3, 1);
+ __ Add_d(a3, a3, a1);
+ } else {
+ __ Add_d(a2, a1, Operand(a2));
+ __ Add_d(a3, a1, Operand(a3));
+ }
+ // V8 expects the output to be an int32_t array.
+ __ St_w(a2, MemOperand(a0, 0));
+ __ Add_d(a0, a0, kIntSize);
+ __ St_w(a3, MemOperand(a0, 0));
+ __ Add_d(a0, a0, kIntSize);
+ }
+ }
+
+ if (global()) {
+ // Restart matching if the regular expression is flagged as global.
+ __ Ld_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ Ld_d(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ Ld_d(a2, MemOperand(frame_pointer(), kRegisterOutput));
+ // Increment success counter.
+ __ Add_d(a0, a0, 1);
+ __ St_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ Sub_d(a1, a1, num_saved_registers_);
+ // Check whether we have enough room for another set of capture results.
+ //__ mov(v0, a0);
+ __ Branch(&return_v0, lt, a1, Operand(num_saved_registers_));
+
+ __ St_d(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ // Advance the location for output.
+ __ Add_d(a2, a2, num_saved_registers_ * kIntSize);
+ __ St_d(a2, MemOperand(frame_pointer(), kRegisterOutput));
+
+ // Prepare a0 to initialize registers with its value in the next run.
+ __ Ld_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ // t3: capture start index
+ // Not a zero-length match, restart.
+ __ Branch(&load_char_start_regexp, ne, current_input_offset(),
+ Operand(t3));
+ // Offset from the end is zero if we already reached the end.
+ __ Branch(&exit_label_, eq, current_input_offset(),
+ Operand(zero_reg));
+ // Advance current position after a zero-length match.
+ Label advance;
+ __ bind(&advance);
+ __ Add_d(current_input_offset(), current_input_offset(),
+ Operand((mode_ == UC16) ? 2 : 1));
+ if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
+ }
+
+ __ Branch(&load_char_start_regexp);
+ } else {
+ __ li(a0, Operand(SUCCESS));
+ }
+ }
+ // Exit and return v0.
+ __ bind(&exit_label_);
+ if (global()) {
+ __ Ld_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ }
+
+ __ bind(&return_v0);
+ // Skip sp past regexp registers and local variables..
+ __ mov(sp, frame_pointer());
+ // Restore registers s0..s7 and return (restoring ra to pc).
+ __ MultiPop(ra.bit(), fp.bit(), registers_to_retain);
+ __ Ret();
+
+ // Backtrack code (branch target for conditional backtracks).
+ if (backtrack_label_.is_linked()) {
+ __ bind(&backtrack_label_);
+ Backtrack();
+ }
+
+ Label exit_with_exception;
+
+ // Preempt-code.
+ if (check_preempt_label_.is_linked()) {
+ SafeCallTarget(&check_preempt_label_);
+ // Put regexp engine registers on stack.
+ RegList regexp_registers_to_retain = current_input_offset().bit() |
+ current_character().bit() |
+ backtrack_stackpointer().bit();
+ __ MultiPush(regexp_registers_to_retain);
+ CallCheckStackGuardState(a0);
+ __ MultiPop(regexp_registers_to_retain);
+ // If returning non-zero, we should end execution with the given
+ // result as return value.
+ __ Branch(&return_v0, ne, a0, Operand(zero_reg));
+
+ // String might have moved: Reload end of string from frame.
+ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+ SafeReturn();
+ }
+
+ // Backtrack stack overflow code.
+ if (stack_overflow_label_.is_linked()) {
+ SafeCallTarget(&stack_overflow_label_);
+ // Reached if the backtrack-stack limit has been hit.
+ // Put regexp engine registers on stack first.
+ RegList regexp_registers =
+ current_input_offset().bit() | current_character().bit();
+ __ MultiPush(regexp_registers);
+
+ // Call GrowStack(backtrack_stackpointer(), &stack_base)
+ static const int num_arguments = 3;
+ __ PrepareCallCFunction(num_arguments, a0);
+ __ mov(a0, backtrack_stackpointer());
+ __ Add_d(a1, frame_pointer(), Operand(kStackHighEnd));
+ __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate())));
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack(masm_->isolate());
+ __ CallCFunction(grow_stack, num_arguments);
+ // Restore regexp registers.
+ __ MultiPop(regexp_registers);
+ // If return nullptr, we have failed to grow the stack, and
+ // must exit with a stack-overflow exception.
+ __ Branch(&exit_with_exception, eq, a0, Operand(zero_reg));
+ // Otherwise use return value as new stack pointer.
+ __ mov(backtrack_stackpointer(), a0);
+ // Restore saved registers and continue.
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ SafeReturn();
+ }
+
+ if (exit_with_exception.is_linked()) {
+ // If any of the code above needed to exit with an exception.
+ __ bind(&exit_with_exception);
+ // Exit with Result EXCEPTION(-1) to signal thrown exception.
+ __ li(a0, Operand(EXCEPTION));
+ __ jmp(&return_v0);
+ }
+
+ if (fallback_label_.is_linked()) {
+ __ bind(&fallback_label_);
+ __ li(a0, Operand(FALLBACK_TO_EXPERIMENTAL));
+ __ jmp(&return_v0);
+ }
+ }
+
+ CodeDesc code_desc;
+ masm_->GetCode(isolate(), &code_desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate(), code_desc, CodeKind::REGEXP)
+ .set_self_reference(masm_->CodeObject())
+ .Build();
+ LOG(masm_->isolate(),
+ RegExpCodeCreateEvent(Handle<AbstractCode>::cast(code), source));
+ return Handle<HeapObject>::cast(code);
+}
+
+void RegExpMacroAssemblerLOONG64::GoTo(Label* to) {
+ if (to == nullptr) {
+ Backtrack();
+ return;
+ }
+ __ jmp(to);
+ return;
+}
+
+void RegExpMacroAssemblerLOONG64::IfRegisterGE(int reg, int comparand,
+ Label* if_ge) {
+ __ Ld_d(a0, register_location(reg));
+ BranchOrBacktrack(if_ge, ge, a0, Operand(comparand));
+}
+
+void RegExpMacroAssemblerLOONG64::IfRegisterLT(int reg, int comparand,
+ Label* if_lt) {
+ __ Ld_d(a0, register_location(reg));
+ BranchOrBacktrack(if_lt, lt, a0, Operand(comparand));
+}
+
+void RegExpMacroAssemblerLOONG64::IfRegisterEqPos(int reg, Label* if_eq) {
+ __ Ld_d(a0, register_location(reg));
+ BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset()));
+}
+
+RegExpMacroAssembler::IrregexpImplementation
+RegExpMacroAssemblerLOONG64::Implementation() {
+ return kLOONG64Implementation;
+}
+
+void RegExpMacroAssemblerLOONG64::PopCurrentPosition() {
+ Pop(current_input_offset());
+}
+
+void RegExpMacroAssemblerLOONG64::PopRegister(int register_index) {
+ Pop(a0);
+ __ St_d(a0, register_location(register_index));
+}
+
+void RegExpMacroAssemblerLOONG64::PushBacktrack(Label* label) {
+ if (label->is_bound()) {
+ int target = label->pos();
+ __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Label after_constant;
+ __ Branch(&after_constant);
+ int offset = masm_->pc_offset();
+ int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag;
+ //__ emit(0);
+ __ nop();
+ masm_->label_at_put(label, offset);
+ __ bind(&after_constant);
+ if (is_int12(cp_offset)) {
+ __ Ld_wu(a0, MemOperand(code_pointer(), cp_offset));
+ } else {
+ __ Add_d(a0, code_pointer(), cp_offset);
+ __ Ld_wu(a0, MemOperand(a0, 0));
+ }
+ }
+ Push(a0);
+ CheckStackLimit();
+}
+
+void RegExpMacroAssemblerLOONG64::PushCurrentPosition() {
+ Push(current_input_offset());
+}
+
+void RegExpMacroAssemblerLOONG64::PushRegister(
+ int register_index, StackCheckFlag check_stack_limit) {
+ __ Ld_d(a0, register_location(register_index));
+ Push(a0);
+ if (check_stack_limit) CheckStackLimit();
+}
+
+void RegExpMacroAssemblerLOONG64::ReadCurrentPositionFromRegister(int reg) {
+ __ Ld_d(current_input_offset(), register_location(reg));
+}
+
+void RegExpMacroAssemblerLOONG64::ReadStackPointerFromRegister(int reg) {
+ __ Ld_d(backtrack_stackpointer(), register_location(reg));
+ __ Ld_d(a0, MemOperand(frame_pointer(), kStackHighEnd));
+ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
+}
+
+void RegExpMacroAssemblerLOONG64::SetCurrentPositionFromEnd(int by) {
+ Label after_position;
+ __ Branch(&after_position, ge, current_input_offset(),
+ Operand(-by * char_size()));
+ __ li(current_input_offset(), -by * char_size());
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&after_position);
+}
+
+void RegExpMacroAssemblerLOONG64::SetRegister(int register_index, int to) {
+ DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
+ __ li(a0, Operand(to));
+ __ St_d(a0, register_location(register_index));
+}
+
+bool RegExpMacroAssemblerLOONG64::Succeed() {
+ __ jmp(&success_label_);
+ return global();
+}
+
+void RegExpMacroAssemblerLOONG64::WriteCurrentPositionToRegister(
+ int reg, int cp_offset) {
+ if (cp_offset == 0) {
+ __ St_d(current_input_offset(), register_location(reg));
+ } else {
+ __ Add_d(a0, current_input_offset(), Operand(cp_offset * char_size()));
+ __ St_d(a0, register_location(reg));
+ }
+}
+
+void RegExpMacroAssemblerLOONG64::ClearRegisters(int reg_from, int reg_to) {
+ DCHECK(reg_from <= reg_to);
+ __ Ld_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ for (int reg = reg_from; reg <= reg_to; reg++) {
+ __ St_d(a0, register_location(reg));
+ }
+}
+
+void RegExpMacroAssemblerLOONG64::WriteStackPointerToRegister(int reg) {
+ __ Ld_d(a1, MemOperand(frame_pointer(), kStackHighEnd));
+ __ Sub_d(a0, backtrack_stackpointer(), a1);
+ __ St_d(a0, register_location(reg));
+}
+
+// Private methods:
+
+void RegExpMacroAssemblerLOONG64::CallCheckStackGuardState(Register scratch) {
+ DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins());
+ DCHECK(!masm_->options().isolate_independent_code);
+
+ int stack_alignment = base::OS::ActivationFrameAlignment();
+
+ // Align the stack pointer and save the original sp value on the stack.
+ __ mov(scratch, sp);
+ __ Sub_d(sp, sp, Operand(kPointerSize));
+ DCHECK(base::bits::IsPowerOfTwo(stack_alignment));
+ __ And(sp, sp, Operand(-stack_alignment));
+ __ St_d(scratch, MemOperand(sp, 0));
+
+ __ mov(a2, frame_pointer());
+ // Code of self.
+ __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
+
+ // We need to make room for the return address on the stack.
+ DCHECK(IsAligned(stack_alignment, kPointerSize));
+ __ Sub_d(sp, sp, Operand(stack_alignment));
+
+ // The stack pointer now points to cell where the return address will be
+ // written. Arguments are in registers, meaning we treat the return address as
+ // argument 5. Since DirectCEntry will handle allocating space for the C
+ // argument slots, we don't need to care about that here. This is how the
+ // stack will look (sp meaning the value of sp at this moment):
+ // [sp + 3] - empty slot if needed for alignment.
+ // [sp + 2] - saved sp.
+ // [sp + 1] - second word reserved for return value.
+ // [sp + 0] - first word reserved for return value.
+
+ // a0 will point to the return address, placed by DirectCEntry.
+ __ mov(a0, sp);
+
+ ExternalReference stack_guard_check =
+ ExternalReference::re_check_stack_guard_state(masm_->isolate());
+ __ li(t7, Operand(stack_guard_check));
+
+ EmbeddedData d = EmbeddedData::FromBlob();
+ CHECK(Builtins::IsIsolateIndependent(Builtin::kDirectCEntry));
+ Address entry = d.InstructionStartOfBuiltin(Builtin::kDirectCEntry);
+ __ li(kScratchReg, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ __ Call(kScratchReg);
+
+ // DirectCEntry allocated space for the C argument slots so we have to
+ // drop them with the return address from the stack with loading saved sp.
+ // At this point stack must look:
+ // [sp + 7] - empty slot if needed for alignment.
+ // [sp + 6] - saved sp.
+ // [sp + 5] - second word reserved for return value.
+ // [sp + 4] - first word reserved for return value.
+ // [sp + 3] - C argument slot.
+ // [sp + 2] - C argument slot.
+ // [sp + 1] - C argument slot.
+ // [sp + 0] - C argument slot.
+ __ Ld_d(sp, MemOperand(sp, stack_alignment));
+
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ return reinterpret_cast<T&>(Memory<int32_t>(re_frame + frame_offset));
+}
+
+template <typename T>
+static T* frame_entry_address(Address re_frame, int frame_offset) {
+ return reinterpret_cast<T*>(re_frame + frame_offset);
+}
+
+int64_t RegExpMacroAssemblerLOONG64::CheckStackGuardState(
+ Address* return_address, Address raw_code, Address re_frame) {
+ Code re_code = Code::cast(Object(raw_code));
+ return NativeRegExpMacroAssembler::CheckStackGuardState(
+ frame_entry<Isolate*>(re_frame, kIsolate),
+ static_cast<int>(frame_entry<int64_t>(re_frame, kStartIndex)),
+ static_cast<RegExp::CallOrigin>(
+ frame_entry<int64_t>(re_frame, kDirectCall)),
+ return_address, re_code,
+ frame_entry_address<Address>(re_frame, kInputString),
+ frame_entry_address<const byte*>(re_frame, kInputStart),
+ frame_entry_address<const byte*>(re_frame, kInputEnd));
+}
+
+MemOperand RegExpMacroAssemblerLOONG64::register_location(int register_index) {
+ DCHECK(register_index < (1 << 30));
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ return MemOperand(frame_pointer(),
+ kRegisterZero - register_index * kPointerSize);
+}
+
+void RegExpMacroAssemblerLOONG64::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ if (cp_offset >= 0) {
+ BranchOrBacktrack(on_outside_input, ge, current_input_offset(),
+ Operand(-cp_offset * char_size()));
+ } else {
+ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add_d(a0, current_input_offset(), Operand(cp_offset * char_size()));
+ BranchOrBacktrack(on_outside_input, le, a0, Operand(a1));
+ }
+}
+
+void RegExpMacroAssemblerLOONG64::BranchOrBacktrack(Label* to,
+ Condition condition,
+ Register rs,
+ const Operand& rt) {
+ if (condition == al) { // Unconditional.
+ if (to == nullptr) {
+ Backtrack();
+ return;
+ }
+ __ jmp(to);
+ return;
+ }
+ if (to == nullptr) {
+ __ Branch(&backtrack_label_, condition, rs, rt);
+ return;
+ }
+ __ Branch(to, condition, rs, rt);
+}
+
+void RegExpMacroAssemblerLOONG64::SafeCall(Label* to, Condition cond,
+ Register rs, const Operand& rt) {
+ __ Branch(to, cond, rs, rt, true);
+}
+
+void RegExpMacroAssemblerLOONG64::SafeReturn() {
+ __ Pop(ra);
+ __ Add_d(t1, ra, Operand(masm_->CodeObject()));
+ __ Jump(t1);
+}
+
+void RegExpMacroAssemblerLOONG64::SafeCallTarget(Label* name) {
+ __ bind(name);
+ __ Sub_d(ra, ra, Operand(masm_->CodeObject()));
+ __ Push(ra);
+}
+
+void RegExpMacroAssemblerLOONG64::Push(Register source) {
+ DCHECK(source != backtrack_stackpointer());
+ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(),
+ Operand(-kIntSize));
+ __ St_w(source, MemOperand(backtrack_stackpointer(), 0));
+}
+
+void RegExpMacroAssemblerLOONG64::Pop(Register target) {
+ DCHECK(target != backtrack_stackpointer());
+ __ Ld_w(target, MemOperand(backtrack_stackpointer(), 0));
+ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), kIntSize);
+}
+
+void RegExpMacroAssemblerLOONG64::CheckPreemption() {
+ // Check for preemption.
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(masm_->isolate());
+ __ li(a0, Operand(stack_limit));
+ __ Ld_d(a0, MemOperand(a0, 0));
+ SafeCall(&check_preempt_label_, ls, sp, Operand(a0));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckStackLimit() {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit_address(
+ masm_->isolate());
+
+ __ li(a0, Operand(stack_limit));
+ __ Ld_d(a0, MemOperand(a0, 0));
+ SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0));
+}
+
+void RegExpMacroAssemblerLOONG64::LoadCurrentCharacterUnchecked(
+ int cp_offset, int characters) {
+ Register offset = current_input_offset();
+
+ // If unaligned load/stores are not supported then this function must only
+ // be used to load a single character at a time.
+ if (!CanReadUnaligned()) {
+ DCHECK_EQ(1, characters);
+ }
+
+ if (cp_offset != 0) {
+ // t3 is not being used to store the capture start index at this point.
+ __ Add_d(t3, current_input_offset(), Operand(cp_offset * char_size()));
+ offset = t3;
+ }
+
+ if (mode_ == LATIN1) {
+ if (characters == 4) {
+ __ Ld_wu(current_character(), MemOperand(end_of_input_address(), offset));
+ } else if (characters == 2) {
+ __ Ld_hu(current_character(), MemOperand(end_of_input_address(), offset));
+ } else {
+ DCHECK_EQ(1, characters);
+ __ Ld_bu(current_character(), MemOperand(end_of_input_address(), offset));
+ }
+ } else {
+ DCHECK(mode_ == UC16);
+ if (characters == 2) {
+ __ Ld_wu(current_character(), MemOperand(end_of_input_address(), offset));
+ } else {
+ DCHECK_EQ(1, characters);
+ __ Ld_hu(current_character(), MemOperand(end_of_input_address(), offset));
+ }
+ }
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h
new file mode 100644
index 0000000000..ea567543db
--- /dev/null
+++ b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h
@@ -0,0 +1,214 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_
+#define V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_
+
+#include "src/base/strings.h"
+#include "src/codegen/loong64/assembler-loong64.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/regexp/regexp-macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class V8_EXPORT_PRIVATE RegExpMacroAssemblerLOONG64
+ : public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerLOONG64(Isolate* isolate, Zone* zone, Mode mode,
+ int registers_to_save);
+ virtual ~RegExpMacroAssemblerLOONG64();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(int cp_offset, Label* on_at_start);
+ virtual void CheckCharacter(uint32_t c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(uint32_t c, uint32_t mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(base::uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(base::uc16 limit, Label* on_less);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward, bool unicode,
+ Label* on_no_match);
+ virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(uint32_t c, uint32_t mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(base::uc16 c, base::uc16 minus,
+ base::uc16 mask,
+ Label* on_not_equal);
+ virtual void CheckCharacterInRange(base::uc16 from, base::uc16 to,
+ Label* on_in_range);
+ virtual void CheckCharacterNotInRange(base::uc16 from, base::uc16 to,
+ Label* on_not_in_range);
+ virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(base::uc16 type, Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacterUnchecked(int cp_offset,
+ int character_count);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
+ virtual void SetRegister(int register_index, int to);
+ virtual bool Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ // {raw_code} is an Address because this is called via ExternalReference.
+ static int64_t CheckStackGuardState(Address* return_address, Address raw_code,
+ Address re_frame);
+
+ void print_regexp_frame_constants();
+
+ private:
+ // Offsets from frame_pointer() of function parameters and stored registers.
+ static const int kFramePointer = 0;
+
+ // Above the frame pointer - Stored registers and stack passed parameters.
+ // Registers s0 to s7, fp, and ra.
+ static const int kStoredRegisters = kFramePointer;
+ // Return address (stored from link register, read into pc on return).
+
+ // TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp.
+
+ static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
+ // Stack frame header.
+ static const int kStackFrameHeader = kReturnAddress;
+ // Stack parameters placed by caller.
+ static const int kIsolate = kStackFrameHeader + kPointerSize;
+
+ // Below the frame pointer.
+ // Register parameters stored by setup code.
+ static const int kDirectCall = kFramePointer - kPointerSize;
+ static const int kStackHighEnd = kDirectCall - kPointerSize;
+ static const int kNumOutputRegisters = kStackHighEnd - kPointerSize;
+ static const int kRegisterOutput = kNumOutputRegisters - kPointerSize;
+ static const int kInputEnd = kRegisterOutput - kPointerSize;
+ static const int kInputStart = kInputEnd - kPointerSize;
+ static const int kStartIndex = kInputStart - kPointerSize;
+ static const int kInputString = kStartIndex - kPointerSize;
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kSuccessfulCaptures = kInputString - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // First register address. Following registers are below it on the stack.
+ static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+
+ // Initial size of code buffer.
+ static const int kRegExpCodeSize = 1024;
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState(Register scratch);
+
+ // The ebp-relative location of a regexp register.
+ MemOperand register_location(int register_index);
+
+ // Register holding the current input position as negative offset from
+ // the end of the string.
+ inline Register current_input_offset() { return a6; }
+
+ // The register containing the current character after LoadCurrentCharacter.
+ inline Register current_character() { return a7; }
+
+ // Register holding address of the end of the input string.
+ inline Register end_of_input_address() { return t2; }
+
+ // Register holding the frame address. Local variables, parameters and
+ // regexp registers are addressed relative to this.
+ inline Register frame_pointer() { return fp; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ inline Register backtrack_stackpointer() { return t0; }
+
+ // Register holding pointer to the current code object.
+ inline Register code_pointer() { return a5; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument).
+ inline int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is nullptr, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Label* to, Condition condition, Register rs,
+ const Operand& rt);
+
+ // Call and return internally in the generated code in a way that
+ // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+ inline void SafeCall(Label* to, Condition cond, Register rs,
+ const Operand& rt);
+ inline void SafeReturn();
+ inline void SafeCallTarget(Label* name);
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // and increments it by a word size.
+ inline void Pop(Register target);
+
+ Isolate* isolate() const { return masm_->isolate(); }
+
+ MacroAssembler* masm_;
+
+ // Which mode to generate code for (Latin1 or UC16).
+ Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1).
+ int num_saved_registers_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+ Label internal_failure_label_;
+ Label fallback_label_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index 2b9f767c24..3f771976fd 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -6,10 +6,7 @@
#define V8_REGEXP_REGEXP_AST_H_
#include "src/base/strings.h"
-#include "src/objects/js-regexp.h"
-#include "src/objects/objects.h"
-#include "src/objects/string.h"
-#include "src/utils/utils.h"
+#include "src/regexp/regexp-flags.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone-list.h"
#include "src/zone/zone.h"
@@ -96,13 +93,14 @@ class CharacterRange {
static inline CharacterRange Singleton(base::uc32 value) {
return CharacterRange(value, value);
}
+ static constexpr int kMaxCodePoint = 0x10ffff;
static inline CharacterRange Range(base::uc32 from, base::uc32 to) {
- DCHECK(0 <= from && to <= String::kMaxCodePoint);
+ DCHECK(0 <= from && to <= kMaxCodePoint);
DCHECK(static_cast<uint32_t>(from) <= static_cast<uint32_t>(to));
return CharacterRange(from, to);
}
static inline CharacterRange Everything() {
- return CharacterRange(0, String::kMaxCodePoint);
+ return CharacterRange(0, kMaxCodePoint);
}
static inline ZoneList<CharacterRange>* List(Zone* zone,
CharacterRange range) {
@@ -566,9 +564,9 @@ class RegExpLookaround final : public RegExpTree {
class RegExpBackReference final : public RegExpTree {
public:
- explicit RegExpBackReference(JSRegExp::Flags flags)
+ explicit RegExpBackReference(RegExpFlags flags)
: capture_(nullptr), name_(nullptr), flags_(flags) {}
- RegExpBackReference(RegExpCapture* capture, JSRegExp::Flags flags)
+ RegExpBackReference(RegExpCapture* capture, RegExpFlags flags)
: capture_(capture), name_(nullptr), flags_(flags) {}
void* Accept(RegExpVisitor* visitor, void* data) override;
RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
@@ -587,7 +585,7 @@ class RegExpBackReference final : public RegExpTree {
private:
RegExpCapture* capture_;
const ZoneVector<base::uc16>* name_;
- const JSRegExp::Flags flags_;
+ const RegExpFlags flags_;
};
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator-inl.h b/deps/v8/src/regexp/regexp-bytecode-generator-inl.h
index 2a6ffec929..bfdd9df93c 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator-inl.h
+++ b/deps/v8/src/regexp/regexp-bytecode-generator-inl.h
@@ -7,7 +7,6 @@
#include "src/regexp/regexp-bytecode-generator.h"
-#include "src/ast/ast.h"
#include "src/regexp/regexp-bytecodes.h"
namespace v8 {
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.cc b/deps/v8/src/regexp/regexp-bytecode-generator.cc
index 397f4ba87a..c5ad2bfba5 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator.cc
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.cc
@@ -5,7 +5,7 @@
#include "src/regexp/regexp-bytecode-generator.h"
#include "src/ast/ast.h"
-#include "src/objects/objects-inl.h"
+#include "src/objects/fixed-array-inl.h"
#include "src/regexp/regexp-bytecode-generator-inl.h"
#include "src/regexp/regexp-bytecode-peephole.h"
#include "src/regexp/regexp-bytecodes.h"
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.h b/deps/v8/src/regexp/regexp-bytecode-generator.h
index 466b535c7e..310ab32cec 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator.h
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.h
@@ -6,6 +6,7 @@
#define V8_REGEXP_REGEXP_BYTECODE_GENERATOR_H_
#include "src/base/strings.h"
+#include "src/codegen/label.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/regexp/regexp-bytecode-peephole.cc b/deps/v8/src/regexp/regexp-bytecode-peephole.cc
index fc64db9013..20de4565d2 100644
--- a/deps/v8/src/regexp/regexp-bytecode-peephole.cc
+++ b/deps/v8/src/regexp/regexp-bytecode-peephole.cc
@@ -4,10 +4,8 @@
#include "src/regexp/regexp-bytecode-peephole.h"
-#include "src/execution/isolate.h"
#include "src/flags/flags.h"
-#include "src/objects/fixed-array.h"
-#include "src/objects/objects-inl.h"
+#include "src/objects/fixed-array-inl.h"
#include "src/regexp/regexp-bytecodes.h"
#include "src/utils/memcopy.h"
#include "src/utils/utils.h"
diff --git a/deps/v8/src/regexp/regexp-compiler-tonode.cc b/deps/v8/src/regexp/regexp-compiler-tonode.cc
index f668aa6d84..b80eefae6d 100644
--- a/deps/v8/src/regexp/regexp-compiler-tonode.cc
+++ b/deps/v8/src/regexp/regexp-compiler-tonode.cc
@@ -6,14 +6,12 @@
#include "src/execution/isolate.h"
#include "src/regexp/regexp.h"
-#ifdef V8_INTL_SUPPORT
-#include "src/regexp/special-case.h"
-#endif // V8_INTL_SUPPORT
#include "src/strings/unicode-inl.h"
#include "src/zone/zone-list-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/base/strings.h"
+#include "src/regexp/special-case.h"
#include "unicode/locid.h"
#include "unicode/uniset.h"
#include "unicode/utypes.h"
@@ -24,6 +22,11 @@ namespace internal {
using namespace regexp_compiler_constants; // NOLINT(build/namespaces)
+constexpr base::uc32 kMaxCodePoint = 0x10ffff;
+constexpr int kMaxUtf16CodeUnit = 0xffff;
+constexpr uint32_t kMaxUtf16CodeUnitU = 0xffff;
+constexpr int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
+
// -------------------------------------------------------------------
// Tree to graph conversion
@@ -65,7 +68,7 @@ static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
return false;
}
}
- if (range.to() != String::kMaxCodePoint) {
+ if (range.to() != kMaxCodePoint) {
return false;
}
return true;
@@ -359,8 +362,8 @@ RegExpNode* UnanchoredAdvance(RegExpCompiler* compiler,
// we advanced into the middle of a surrogate pair, it will work out, as
// nothing will match from there. We will have to advance again, consuming
// the associated trail surrogate.
- ZoneList<CharacterRange>* range = CharacterRange::List(
- zone, CharacterRange::Range(0, String::kMaxUtf16CodeUnit));
+ ZoneList<CharacterRange>* range =
+ CharacterRange::List(zone, CharacterRange::Range(0, kMaxUtf16CodeUnit));
return TextNode::CreateForCharacterRanges(zone, range, false, on_success);
}
@@ -518,7 +521,7 @@ bool RegExpDisjunction::SortConsecutiveAtoms(RegExpCompiler* compiler) {
DCHECK_LT(first_atom, alternatives->length());
DCHECK_LE(i, alternatives->length());
DCHECK_LE(first_atom, i);
- if (IgnoreCase(compiler->flags())) {
+ if (IsIgnoreCase(compiler->flags())) {
#ifdef V8_INTL_SUPPORT
alternatives->StableSort(CompareFirstCharCaseInsensitve, first_atom,
i - first_atom);
@@ -570,14 +573,14 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
#ifdef V8_INTL_SUPPORT
icu::UnicodeString new_prefix(atom->data().at(0));
if (new_prefix != common_prefix) {
- if (!IgnoreCase(compiler->flags())) break;
+ if (!IsIgnoreCase(compiler->flags())) break;
if (common_prefix.caseCompare(new_prefix, U_FOLD_CASE_DEFAULT) != 0)
break;
}
#else
unibrow::uchar new_prefix = atom->data().at(0);
if (new_prefix != common_prefix) {
- if (!IgnoreCase(compiler->flags())) break;
+ if (!IsIgnoreCase(compiler->flags())) break;
unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
compiler->isolate()->regexp_macro_assembler_canonicalize();
new_prefix = Canonical(canonicalize, new_prefix);
@@ -658,7 +661,7 @@ void RegExpDisjunction::FixSingleCharacterDisjunctions(
i++;
continue;
}
- const JSRegExp::Flags flags = compiler->flags();
+ const RegExpFlags flags = compiler->flags();
DCHECK_IMPLIES(IsUnicode(flags),
!unibrow::Utf16::IsLeadSurrogate(atom->data().at(0)));
bool contains_trail_surrogate =
@@ -740,7 +743,7 @@ namespace {
RegExpNode* BoundaryAssertionAsLookaround(RegExpCompiler* compiler,
RegExpNode* on_success,
RegExpAssertion::AssertionType type,
- JSRegExp::Flags flags) {
+ RegExpFlags flags) {
CHECK(NeedsUnicodeCaseEquivalents(flags));
Zone* zone = compiler->zone();
ZoneList<CharacterRange>* word_range =
@@ -1038,7 +1041,7 @@ static void AddClassNegated(const int* elmv, int elmc,
elmc--;
DCHECK_EQ(kRangeEndMarker, elmv[elmc]);
DCHECK_NE(0x0000, elmv[0]);
- DCHECK_NE(String::kMaxCodePoint, elmv[elmc - 1]);
+ DCHECK_NE(kMaxCodePoint, elmv[elmc - 1]);
base::uc16 last = 0x0000;
for (int i = 0; i < elmc; i += 2) {
DCHECK(last <= elmv[i] - 1);
@@ -1046,7 +1049,7 @@ static void AddClassNegated(const int* elmv, int elmc,
ranges->Add(CharacterRange::Range(last, elmv[i] - 1), zone);
last = elmv[i + 1];
}
- ranges->Add(CharacterRange::Range(last, String::kMaxCodePoint), zone);
+ ranges->Add(CharacterRange::Range(last, kMaxCodePoint), zone);
}
void CharacterRange::AddClassEscape(char type, ZoneList<CharacterRange>* ranges,
@@ -1128,13 +1131,13 @@ void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
for (int i = 0; i < range_count; i++) {
CharacterRange range = ranges->at(i);
base::uc32 from = range.from();
- if (from > String::kMaxUtf16CodeUnit) continue;
- base::uc32 to = std::min({range.to(), String::kMaxUtf16CodeUnitU});
+ if (from > kMaxUtf16CodeUnit) continue;
+ base::uc32 to = std::min({range.to(), kMaxUtf16CodeUnitU});
// Nothing to be done for surrogates.
if (from >= kLeadSurrogateStart && to <= kTrailSurrogateEnd) continue;
if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
- if (from > String::kMaxOneByteCharCode) continue;
- if (to > String::kMaxOneByteCharCode) to = String::kMaxOneByteCharCode;
+ if (from > kMaxOneByteCharCode) continue;
+ if (to > kMaxOneByteCharCode) to = kMaxOneByteCharCode;
}
others.add(from, to);
}
@@ -1171,13 +1174,13 @@ void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
for (int i = 0; i < range_count; i++) {
CharacterRange range = ranges->at(i);
base::uc32 bottom = range.from();
- if (bottom > String::kMaxUtf16CodeUnit) continue;
- base::uc32 top = std::min({range.to(), String::kMaxUtf16CodeUnitU});
+ if (bottom > kMaxUtf16CodeUnit) continue;
+ base::uc32 top = std::min({range.to(), kMaxUtf16CodeUnitU});
// Nothing to be done for surrogates.
if (bottom >= kLeadSurrogateStart && top <= kTrailSurrogateEnd) continue;
if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
- if (bottom > String::kMaxOneByteCharCode) continue;
- if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode;
+ if (bottom > kMaxOneByteCharCode) continue;
+ if (top > kMaxOneByteCharCode) top = kMaxOneByteCharCode;
}
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
if (top == bottom) {
@@ -1389,9 +1392,8 @@ void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
from = range.to() + 1;
i++;
}
- if (from < String::kMaxCodePoint) {
- negated_ranges->Add(CharacterRange::Range(from, String::kMaxCodePoint),
- zone);
+ if (from < kMaxCodePoint) {
+ negated_ranges->Add(CharacterRange::Range(from, kMaxCodePoint), zone);
}
}
diff --git a/deps/v8/src/regexp/regexp-compiler.cc b/deps/v8/src/regexp/regexp-compiler.cc
index 38a3d4447f..5123cd138c 100644
--- a/deps/v8/src/regexp/regexp-compiler.cc
+++ b/deps/v8/src/regexp/regexp-compiler.cc
@@ -6,15 +6,13 @@
#include "src/base/safe_conversions.h"
#include "src/execution/isolate.h"
-#include "src/objects/objects-inl.h"
+#include "src/objects/fixed-array-inl.h"
#include "src/regexp/regexp-macro-assembler-arch.h"
-#ifdef V8_INTL_SUPPORT
-#include "src/regexp/special-case.h"
-#endif // V8_INTL_SUPPORT
#include "src/strings/unicode-inl.h"
#include "src/zone/zone-list-inl.h"
#ifdef V8_INTL_SUPPORT
+#include "src/regexp/special-case.h"
#include "unicode/locid.h"
#include "unicode/uniset.h"
#include "unicode/utypes.h"
@@ -240,7 +238,7 @@ class RecursionCheck {
// Attempts to compile the regexp using an Irregexp code generator. Returns
// a fixed array or a null handle depending on whether it succeeded.
RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
- JSRegExp::Flags flags, bool one_byte)
+ RegExpFlags flags, bool one_byte)
: next_register_(JSRegExp::RegistersForCaptureCount(capture_count)),
unicode_lookaround_stack_register_(kNoRegister),
unicode_lookaround_position_register_(kNoRegister),
@@ -1589,7 +1587,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
QuickCheckDetails::Position* pos =
details->positions(characters_filled_in);
base::uc16 c = quarks[i];
- if (IgnoreCase(compiler->flags())) {
+ if (IsIgnoreCase(compiler->flags())) {
unibrow::uchar chars[4];
int length = GetCaseIndependentLetters(
isolate, c, compiler->one_byte(), chars, 4);
@@ -1819,7 +1817,7 @@ class IterationDecrementer {
LoopChoiceNode* node_;
};
-RegExpNode* SeqRegExpNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
+RegExpNode* SeqRegExpNode::FilterOneByte(int depth, RegExpFlags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
DCHECK(!info()->visited);
@@ -1827,7 +1825,7 @@ RegExpNode* SeqRegExpNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
return FilterSuccessor(depth - 1, flags);
}
-RegExpNode* SeqRegExpNode::FilterSuccessor(int depth, JSRegExp::Flags flags) {
+RegExpNode* SeqRegExpNode::FilterSuccessor(int depth, RegExpFlags flags) {
RegExpNode* next = on_success_->FilterOneByte(depth - 1, flags);
if (next == nullptr) return set_replacement(nullptr);
on_success_ = next;
@@ -1849,7 +1847,7 @@ static bool RangesContainLatin1Equivalents(ZoneList<CharacterRange>* ranges) {
return false;
}
-RegExpNode* TextNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
+RegExpNode* TextNode::FilterOneByte(int depth, RegExpFlags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
DCHECK(!info()->visited);
@@ -1861,7 +1859,7 @@ RegExpNode* TextNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
base::Vector<const base::uc16> quarks = elm.atom()->data();
for (int j = 0; j < quarks.length(); j++) {
base::uc16 c = quarks[j];
- if (IgnoreCase(flags)) {
+ if (IsIgnoreCase(flags)) {
c = unibrow::Latin1::TryConvertToLatin1(c);
}
if (c > unibrow::Latin1::kMaxChar) return set_replacement(nullptr);
@@ -1880,7 +1878,7 @@ RegExpNode* TextNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
if (range_count != 0 && ranges->at(0).from() == 0 &&
ranges->at(0).to() >= String::kMaxOneByteCharCode) {
// This will be handled in a later filter.
- if (IgnoreCase(flags) && RangesContainLatin1Equivalents(ranges)) {
+ if (IsIgnoreCase(flags) && RangesContainLatin1Equivalents(ranges)) {
continue;
}
return set_replacement(nullptr);
@@ -1889,7 +1887,7 @@ RegExpNode* TextNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
if (range_count == 0 ||
ranges->at(0).from() > String::kMaxOneByteCharCode) {
// This will be handled in a later filter.
- if (IgnoreCase(flags) && RangesContainLatin1Equivalents(ranges)) {
+ if (IsIgnoreCase(flags) && RangesContainLatin1Equivalents(ranges)) {
continue;
}
return set_replacement(nullptr);
@@ -1900,7 +1898,7 @@ RegExpNode* TextNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
return FilterSuccessor(depth - 1, flags);
}
-RegExpNode* LoopChoiceNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
+RegExpNode* LoopChoiceNode::FilterOneByte(int depth, RegExpFlags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -1917,7 +1915,7 @@ RegExpNode* LoopChoiceNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
return ChoiceNode::FilterOneByte(depth - 1, flags);
}
-RegExpNode* ChoiceNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
+RegExpNode* ChoiceNode::FilterOneByte(int depth, RegExpFlags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -1969,7 +1967,7 @@ RegExpNode* ChoiceNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
}
RegExpNode* NegativeLookaroundChoiceNode::FilterOneByte(int depth,
- JSRegExp::Flags flags) {
+ RegExpFlags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -2321,13 +2319,13 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler, TextEmitPassType pass,
TextElement elm = elements()->at(i);
int cp_offset = trace->cp_offset() + elm.cp_offset() + backward_offset;
if (elm.text_type() == TextElement::ATOM) {
- if (SkipPass(pass, IgnoreCase(compiler->flags()))) continue;
+ if (SkipPass(pass, IsIgnoreCase(compiler->flags()))) continue;
base::Vector<const base::uc16> quarks = elm.atom()->data();
for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
if (first_element_checked && i == 0 && j == 0) continue;
if (DeterminedAlready(quick_check, elm.cp_offset() + j)) continue;
base::uc16 quark = quarks[j];
- if (IgnoreCase(compiler->flags())) {
+ if (IsIgnoreCase(compiler->flags())) {
// Everywhere else we assume that a non-Latin-1 character cannot match
// a Latin-1 character. Avoid the cases where this is assumption is
// invalid by using the Latin1 equivalent instead.
@@ -2491,8 +2489,8 @@ void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
}
void TextNode::MakeCaseIndependent(Isolate* isolate, bool is_one_byte,
- JSRegExp::Flags flags) {
- if (!IgnoreCase(flags)) return;
+ RegExpFlags flags) {
+ if (!IsIgnoreCase(flags)) return;
#ifdef V8_INTL_SUPPORT
if (NeedsUnicodeCaseEquivalents(flags)) return;
#endif
@@ -3444,7 +3442,7 @@ void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
RecursionCheck rc(compiler);
DCHECK_EQ(start_reg_ + 1, end_reg_);
- if (IgnoreCase(flags_)) {
+ if (IsIgnoreCase(flags_)) {
bool unicode = IsUnicode(flags_);
assembler->CheckNotBackReferenceIgnoreCase(start_reg_, read_backward(),
unicode, trace->backtrack());
@@ -3634,7 +3632,7 @@ class EatsAtLeastPropagator : public AllStatic {
template <typename... Propagators>
class Analysis : public NodeVisitor {
public:
- Analysis(Isolate* isolate, bool is_one_byte, JSRegExp::Flags flags)
+ Analysis(Isolate* isolate, bool is_one_byte, RegExpFlags flags)
: isolate_(isolate),
is_one_byte_(is_one_byte),
flags_(flags),
@@ -3746,14 +3744,14 @@ class Analysis : public NodeVisitor {
private:
Isolate* isolate_;
const bool is_one_byte_;
- const JSRegExp::Flags flags_;
+ const RegExpFlags flags_;
RegExpError error_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
};
-RegExpError AnalyzeRegExp(Isolate* isolate, bool is_one_byte,
- JSRegExp::Flags flags, RegExpNode* node) {
+RegExpError AnalyzeRegExp(Isolate* isolate, bool is_one_byte, RegExpFlags flags,
+ RegExpNode* node) {
Analysis<AssertionPropagator, EatsAtLeastPropagator> analysis(
isolate, is_one_byte, flags);
DCHECK_EQ(node->info()->been_analyzed, false);
@@ -3809,7 +3807,7 @@ void TextNode::FillInBMInfo(Isolate* isolate, int initial_offset, int budget,
return;
}
base::uc16 character = atom->data()[j];
- if (IgnoreCase(bm->compiler()->flags())) {
+ if (IsIgnoreCase(bm->compiler()->flags())) {
unibrow::uchar chars[4];
int length = GetCaseIndependentLetters(
isolate, character, bm->max_char() == String::kMaxOneByteCharCode,
@@ -3874,7 +3872,7 @@ RegExpNode* RegExpCompiler::OptionallyStepBackToLeadSurrogate(
}
RegExpNode* RegExpCompiler::PreprocessRegExp(RegExpCompileData* data,
- JSRegExp::Flags flags,
+ RegExpFlags flags,
bool is_one_byte) {
// Wrap the body of the regexp in capture #0.
RegExpNode* captured_body =
diff --git a/deps/v8/src/regexp/regexp-compiler.h b/deps/v8/src/regexp/regexp-compiler.h
index 2be7a48e9a..832a966217 100644
--- a/deps/v8/src/regexp/regexp-compiler.h
+++ b/deps/v8/src/regexp/regexp-compiler.h
@@ -9,6 +9,7 @@
#include "src/base/small-vector.h"
#include "src/base/strings.h"
+#include "src/regexp/regexp-flags.h"
#include "src/regexp/regexp-nodes.h"
namespace v8 {
@@ -49,34 +50,10 @@ constexpr int kPatternTooShortForBoyerMoore = 2;
} // namespace regexp_compiler_constants
-inline bool IgnoreCase(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kIgnoreCase) != 0;
-}
-
-inline bool IsUnicode(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kUnicode) != 0;
-}
-
-inline bool IsSticky(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kSticky) != 0;
-}
-
-inline bool IsGlobal(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kGlobal) != 0;
-}
-
-inline bool DotAll(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kDotAll) != 0;
-}
-
-inline bool Multiline(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kMultiline) != 0;
-}
-
-inline bool NeedsUnicodeCaseEquivalents(JSRegExp::Flags flags) {
+inline bool NeedsUnicodeCaseEquivalents(RegExpFlags flags) {
// Both unicode and ignore_case flags are set. We need to use ICU to find
// the closure over case equivalents.
- return IsUnicode(flags) && IgnoreCase(flags);
+ return IsUnicode(flags) && IsIgnoreCase(flags);
}
// Details of a quick mask-compare check that can look ahead in the
@@ -424,8 +401,8 @@ struct PreloadState {
// Analysis performs assertion propagation and computes eats_at_least_ values.
// See the comments on AssertionPropagator and EatsAtLeastPropagator for more
// details.
-RegExpError AnalyzeRegExp(Isolate* isolate, bool is_one_byte,
- JSRegExp::Flags flags, RegExpNode* node);
+RegExpError AnalyzeRegExp(Isolate* isolate, bool is_one_byte, RegExpFlags flags,
+ RegExpNode* node);
class FrequencyCollator {
public:
@@ -475,7 +452,7 @@ class FrequencyCollator {
class RegExpCompiler {
public:
RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
- JSRegExp::Flags flags, bool is_one_byte);
+ RegExpFlags flags, bool is_one_byte);
int AllocateRegister() {
if (next_register_ >= RegExpMacroAssembler::kMaxRegister) {
@@ -527,7 +504,7 @@ class RegExpCompiler {
// - Inserting the implicit .* before/after the regexp if necessary.
// - If the input is a one-byte string, filtering out nodes that can't match.
// - Fixing up regexp matches that start within a surrogate pair.
- RegExpNode* PreprocessRegExp(RegExpCompileData* data, JSRegExp::Flags flags,
+ RegExpNode* PreprocessRegExp(RegExpCompileData* data, RegExpFlags flags,
bool is_one_byte);
// If the regexp matching starts within a surrogate pair, step back to the
@@ -553,7 +530,7 @@ class RegExpCompiler {
inline void IncrementRecursionDepth() { recursion_depth_++; }
inline void DecrementRecursionDepth() { recursion_depth_--; }
- JSRegExp::Flags flags() const { return flags_; }
+ RegExpFlags flags() const { return flags_; }
void SetRegExpTooBig() { reg_exp_too_big_ = true; }
@@ -585,7 +562,7 @@ class RegExpCompiler {
int unicode_lookaround_position_register_;
ZoneVector<RegExpNode*>* work_list_;
int recursion_depth_;
- const JSRegExp::Flags flags_;
+ const RegExpFlags flags_;
RegExpMacroAssembler* macro_assembler_;
bool one_byte_;
bool reg_exp_too_big_;
diff --git a/deps/v8/src/regexp/regexp-error.h b/deps/v8/src/regexp/regexp-error.h
index 628f93638e..6485e74bb6 100644
--- a/deps/v8/src/regexp/regexp-error.h
+++ b/deps/v8/src/regexp/regexp-error.h
@@ -53,6 +53,11 @@ enum class RegExpError : uint32_t {
V8_EXPORT_PRIVATE const char* RegExpErrorString(RegExpError error);
+inline constexpr bool RegExpErrorIsStackOverflow(RegExpError error) {
+ return error == RegExpError::kStackOverflow ||
+ error == RegExpError::kAnalysisStackOverflow;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-flags.h b/deps/v8/src/regexp/regexp-flags.h
new file mode 100644
index 0000000000..b35cd7892b
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-flags.h
@@ -0,0 +1,71 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_FLAGS_H_
+#define V8_REGEXP_REGEXP_FLAGS_H_
+
+#include "src/base/flags.h"
+#include "src/base/optional.h"
+
+namespace v8 {
+namespace internal {
+
+// TODO(jgruber,pthier): Decouple more parts of the codebase from
+// JSRegExp::Flags. Consider removing JSRegExp::Flags.
+
+// Order is important! Sorted in alphabetic order by the flag char. Note this
+// means that flag bits are shuffled. Take care to keep them contiguous when
+// adding/removing flags.
+#define REGEXP_FLAG_LIST(V) \
+ V(has_indices, HasIndices, hasIndices, 'd', 7) \
+ V(global, Global, global, 'g', 0) \
+ V(ignore_case, IgnoreCase, ignoreCase, 'i', 1) \
+ V(linear, Linear, linear, 'l', 6) \
+ V(multiline, Multiline, multiline, 'm', 2) \
+ V(dot_all, DotAll, dotAll, 's', 5) \
+ V(unicode, Unicode, unicode, 'u', 4) \
+ V(sticky, Sticky, sticky, 'y', 3)
+
+#define V(Lower, Camel, LowerCamel, Char, Bit) k##Camel = 1 << Bit,
+enum class RegExpFlag { REGEXP_FLAG_LIST(V) };
+#undef V
+
+#define V(...) +1
+constexpr int kRegExpFlagCount = REGEXP_FLAG_LIST(V);
+#undef V
+
+// Assert alpha-sorted chars.
+#define V(Lower, Camel, LowerCamel, Char, Bit) < Char) && (Char
+static_assert((('a' - 1) REGEXP_FLAG_LIST(V) <= 'z'), "alpha-sort chars");
+#undef V
+
+// Assert contiguous indices.
+#define V(Lower, Camel, LowerCamel, Char, Bit) | (1 << Bit)
+static_assert(((1 << kRegExpFlagCount) - 1) == (0 REGEXP_FLAG_LIST(V)),
+ "contiguous bits");
+#undef V
+
+using RegExpFlags = base::Flags<RegExpFlag>;
+DEFINE_OPERATORS_FOR_FLAGS(RegExpFlags)
+
+#define V(Lower, Camel, ...) \
+ constexpr bool Is##Camel(RegExpFlags f) { \
+ return (f & RegExpFlag::k##Camel) != 0; \
+ }
+REGEXP_FLAG_LIST(V)
+#undef V
+
+// clang-format off
+#define V(Lower, Camel, LowerCamel, Char, Bit) \
+ c == Char ? RegExpFlag::k##Camel :
+constexpr base::Optional<RegExpFlag> TryRegExpFlagFromChar(char c) {
+ return REGEXP_FLAG_LIST(V) base::Optional<RegExpFlag>{};
+}
+#undef V
+// clang-format on
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_REGEXP_FLAGS_H_
diff --git a/deps/v8/src/regexp/regexp-interpreter.cc b/deps/v8/src/regexp/regexp-interpreter.cc
index 02fc334920..f9a959d258 100644
--- a/deps/v8/src/regexp/regexp-interpreter.cc
+++ b/deps/v8/src/regexp/regexp-interpreter.cc
@@ -6,17 +6,18 @@
#include "src/regexp/regexp-interpreter.h"
-#include "src/ast/ast.h"
#include "src/base/small-vector.h"
#include "src/base/strings.h"
+#include "src/execution/isolate.h"
#include "src/logging/counters.h"
#include "src/objects/js-regexp-inl.h"
-#include "src/objects/objects-inl.h"
+#include "src/objects/string-inl.h"
#include "src/regexp/regexp-bytecodes.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h" // For kMaximumStackSize.
#include "src/regexp/regexp.h"
#include "src/strings/unicode.h"
+#include "src/utils/memcopy.h"
#include "src/utils/utils.h"
#ifdef V8_INTL_SUPPORT
diff --git a/deps/v8/src/regexp/regexp-interpreter.h b/deps/v8/src/regexp/regexp-interpreter.h
index 9b4a8c6c30..a4d79184b0 100644
--- a/deps/v8/src/regexp/regexp-interpreter.h
+++ b/deps/v8/src/regexp/regexp-interpreter.h
@@ -12,6 +12,8 @@
namespace v8 {
namespace internal {
+class ByteArray;
+
class V8_EXPORT_PRIVATE IrregexpInterpreter : public AllStatic {
public:
enum Result {
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-arch.h b/deps/v8/src/regexp/regexp-macro-assembler-arch.h
index 5d5e3e6a44..5d4663e397 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-arch.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-arch.h
@@ -21,6 +21,8 @@
#include "src/regexp/mips/regexp-macro-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/regexp/loong64/regexp-macro-assembler-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/regexp/s390/regexp-macro-assembler-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
index af148eb47a..ca6abb4e48 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
@@ -4,8 +4,8 @@
#include "src/regexp/regexp-macro-assembler-tracer.h"
-#include "src/ast/ast.h"
-#include "src/objects/objects-inl.h"
+#include "src/objects/fixed-array-inl.h"
+#include "src/objects/string.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 5457398f39..891079b357 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -5,6 +5,7 @@
#include "src/regexp/regexp-macro-assembler.h"
#include "src/codegen/assembler.h"
+#include "src/codegen/label.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/pointer-authentication.h"
#include "src/execution/simulator.h"
@@ -22,12 +23,17 @@ namespace internal {
RegExpMacroAssembler::RegExpMacroAssembler(Isolate* isolate, Zone* zone)
: slow_safe_compiler_(false),
+ backtrack_limit_(JSRegExp::kNoBacktrackLimit),
global_mode_(NOT_GLOBAL),
isolate_(isolate),
zone_(zone) {}
RegExpMacroAssembler::~RegExpMacroAssembler() = default;
+bool RegExpMacroAssembler::has_backtrack_limit() const {
+ return backtrack_limit_ != JSRegExp::kNoBacktrackLimit;
+}
+
int RegExpMacroAssembler::CaseInsensitiveCompareNonUnicode(Address byte_offset1,
Address byte_offset2,
size_t byte_length,
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index 31e8b1a370..9bd9ba615e 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -6,13 +6,15 @@
#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_H_
#include "src/base/strings.h"
-#include "src/codegen/label.h"
#include "src/regexp/regexp-ast.h"
#include "src/regexp/regexp.h"
namespace v8 {
namespace internal {
+class ByteArray;
+class Label;
+
static const base::uc32 kLeadSurrogateStart = 0xd800;
static const base::uc32 kLeadSurrogateEnd = 0xdbff;
static const base::uc32 kTrailSurrogateStart = 0xdc00;
@@ -45,6 +47,7 @@ class RegExpMacroAssembler {
V(ARM) \
V(ARM64) \
V(MIPS) \
+ V(LOONG64) \
V(RISCV) \
V(S390) \
V(PPC) \
@@ -230,20 +233,18 @@ class RegExpMacroAssembler {
Zone* zone() const { return zone_; }
protected:
- bool has_backtrack_limit() const {
- return backtrack_limit_ != JSRegExp::kNoBacktrackLimit;
- }
+ bool has_backtrack_limit() const;
uint32_t backtrack_limit() const { return backtrack_limit_; }
bool can_fallback() const { return can_fallback_; }
private:
bool slow_safe_compiler_;
- uint32_t backtrack_limit_ = JSRegExp::kNoBacktrackLimit;
+ uint32_t backtrack_limit_;
bool can_fallback_ = false;
GlobalMode global_mode_;
- Isolate* isolate_;
- Zone* zone_;
+ Isolate* const isolate_;
+ Zone* const zone_;
};
class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
diff --git a/deps/v8/src/regexp/regexp-nodes.h b/deps/v8/src/regexp/regexp-nodes.h
index 537cf96201..46b6f5ce21 100644
--- a/deps/v8/src/regexp/regexp-nodes.h
+++ b/deps/v8/src/regexp/regexp-nodes.h
@@ -5,6 +5,7 @@
#ifndef V8_REGEXP_REGEXP_NODES_H_
#define V8_REGEXP_REGEXP_NODES_H_
+#include "src/codegen/label.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/zone/zone.h"
@@ -14,7 +15,6 @@ namespace internal {
class AlternativeGenerationList;
class BoyerMooreLookahead;
class GreedyLoopState;
-class Label;
class NodeVisitor;
class QuickCheckDetails;
class RegExpCompiler;
@@ -205,7 +205,7 @@ class RegExpNode : public ZoneObject {
// If we know that the input is one-byte then there are some nodes that can
// never match. This method returns a node that can be substituted for
// itself, or nullptr if the node can never match.
- virtual RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) {
+ virtual RegExpNode* FilterOneByte(int depth, RegExpFlags flags) {
return this;
}
// Helper for FilterOneByte.
@@ -296,7 +296,7 @@ class SeqRegExpNode : public RegExpNode {
: RegExpNode(on_success->zone()), on_success_(on_success) {}
RegExpNode* on_success() { return on_success_; }
void set_on_success(RegExpNode* node) { on_success_ = node; }
- RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
+ RegExpNode* FilterOneByte(int depth, RegExpFlags flags) override;
void FillInBMInfo(Isolate* isolate, int offset, int budget,
BoyerMooreLookahead* bm, bool not_at_start) override {
on_success_->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
@@ -304,7 +304,7 @@ class SeqRegExpNode : public RegExpNode {
}
protected:
- RegExpNode* FilterSuccessor(int depth, JSRegExp::Flags flags);
+ RegExpNode* FilterSuccessor(int depth, RegExpFlags flags);
private:
RegExpNode* on_success_;
@@ -423,14 +423,14 @@ class TextNode : public SeqRegExpNode {
ZoneList<TextElement>* elements() { return elms_; }
bool read_backward() { return read_backward_; }
void MakeCaseIndependent(Isolate* isolate, bool is_one_byte,
- JSRegExp::Flags flags);
+ RegExpFlags flags);
int GreedyLoopTextLength() override;
RegExpNode* GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler) override;
void FillInBMInfo(Isolate* isolate, int offset, int budget,
BoyerMooreLookahead* bm, bool not_at_start) override;
void CalculateOffsets();
- RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
+ RegExpNode* FilterOneByte(int depth, RegExpFlags flags) override;
int Length();
private:
@@ -498,7 +498,7 @@ class AssertionNode : public SeqRegExpNode {
class BackReferenceNode : public SeqRegExpNode {
public:
- BackReferenceNode(int start_reg, int end_reg, JSRegExp::Flags flags,
+ BackReferenceNode(int start_reg, int end_reg, RegExpFlags flags,
bool read_backward, RegExpNode* on_success)
: SeqRegExpNode(on_success),
start_reg_(start_reg),
@@ -521,7 +521,7 @@ class BackReferenceNode : public SeqRegExpNode {
private:
int start_reg_;
int end_reg_;
- JSRegExp::Flags flags_;
+ RegExpFlags flags_;
bool read_backward_;
};
@@ -623,7 +623,7 @@ class ChoiceNode : public RegExpNode {
virtual bool try_to_emit_quick_check_for_alternative(bool is_first) {
return true;
}
- RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
+ RegExpNode* FilterOneByte(int depth, RegExpFlags flags) override;
virtual bool read_backward() { return false; }
protected:
@@ -695,7 +695,7 @@ class NegativeLookaroundChoiceNode : public ChoiceNode {
return !is_first;
}
void Accept(NodeVisitor* visitor) override;
- RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
+ RegExpNode* FilterOneByte(int depth, RegExpFlags flags) override;
};
class LoopChoiceNode : public ChoiceNode {
@@ -728,7 +728,7 @@ class LoopChoiceNode : public ChoiceNode {
int min_loop_iterations() const { return min_loop_iterations_; }
bool read_backward() override { return read_backward_; }
void Accept(NodeVisitor* visitor) override;
- RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
+ RegExpNode* FilterOneByte(int depth, RegExpFlags flags) override;
private:
// AddAlternative is made private for loop nodes because alternatives
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 1201e555ad..1d9f24b792 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -4,12 +4,9 @@
#include "src/regexp/regexp-parser.h"
-#include <vector>
-
#include "src/execution/isolate.h"
-#include "src/heap/factory.h"
-#include "src/objects/objects-inl.h"
#include "src/regexp/property-sequences.h"
+#include "src/regexp/regexp-ast.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp.h"
#include "src/strings/char-predicates-inl.h"
@@ -24,14 +21,386 @@
namespace v8 {
namespace internal {
-RegExpParser::RegExpParser(FlatStringReader* in, JSRegExp::Flags flags,
- Isolate* isolate, Zone* zone)
- : isolate_(isolate),
- zone_(zone),
+namespace {
+
+// A BufferedZoneList is an automatically growing list, just like (and backed
+// by) a ZoneList, that is optimized for the case of adding and removing
+// a single element. The last element added is stored outside the backing list,
+// and if no more than one element is ever added, the ZoneList isn't even
+// allocated.
+// Elements must not be nullptr pointers.
+template <typename T, int initial_size>
+class BufferedZoneList {
+ public:
+ BufferedZoneList() : list_(nullptr), last_(nullptr) {}
+
+ // Adds element at end of list. This element is buffered and can
+ // be read using last() or removed using RemoveLast until a new Add or until
+ // RemoveLast or GetList has been called.
+ void Add(T* value, Zone* zone) {
+ if (last_ != nullptr) {
+ if (list_ == nullptr) {
+ list_ = zone->New<ZoneList<T*>>(initial_size, zone);
+ }
+ list_->Add(last_, zone);
+ }
+ last_ = value;
+ }
+
+ T* last() {
+ DCHECK(last_ != nullptr);
+ return last_;
+ }
+
+ T* RemoveLast() {
+ DCHECK(last_ != nullptr);
+ T* result = last_;
+ if ((list_ != nullptr) && (list_->length() > 0))
+ last_ = list_->RemoveLast();
+ else
+ last_ = nullptr;
+ return result;
+ }
+
+ T* Get(int i) {
+ DCHECK((0 <= i) && (i < length()));
+ if (list_ == nullptr) {
+ DCHECK_EQ(0, i);
+ return last_;
+ } else {
+ if (i == list_->length()) {
+ DCHECK(last_ != nullptr);
+ return last_;
+ } else {
+ return list_->at(i);
+ }
+ }
+ }
+
+ void Clear() {
+ list_ = nullptr;
+ last_ = nullptr;
+ }
+
+ int length() {
+ int length = (list_ == nullptr) ? 0 : list_->length();
+ return length + ((last_ == nullptr) ? 0 : 1);
+ }
+
+ ZoneList<T*>* GetList(Zone* zone) {
+ if (list_ == nullptr) {
+ list_ = zone->New<ZoneList<T*>>(initial_size, zone);
+ }
+ if (last_ != nullptr) {
+ list_->Add(last_, zone);
+ last_ = nullptr;
+ }
+ return list_;
+ }
+
+ private:
+ ZoneList<T*>* list_;
+ T* last_;
+};
+
+// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
+class RegExpBuilder : public ZoneObject {
+ public:
+ RegExpBuilder(Zone* zone, RegExpFlags flags);
+ void AddCharacter(base::uc16 character);
+ void AddUnicodeCharacter(base::uc32 character);
+ void AddEscapedUnicodeCharacter(base::uc32 character);
+ // "Adds" an empty expression. Does nothing except consume a
+ // following quantifier
+ void AddEmpty();
+ void AddCharacterClass(RegExpCharacterClass* cc);
+ void AddCharacterClassForDesugaring(base::uc32 c);
+ void AddAtom(RegExpTree* tree);
+ void AddTerm(RegExpTree* tree);
+ void AddAssertion(RegExpTree* tree);
+ void NewAlternative(); // '|'
+ bool AddQuantifierToAtom(int min, int max,
+ RegExpQuantifier::QuantifierType type);
+ void FlushText();
+ RegExpTree* ToRegExp();
+ RegExpFlags flags() const { return flags_; }
+
+ bool ignore_case() const { return IsIgnoreCase(flags_); }
+ bool multiline() const { return IsMultiline(flags_); }
+ bool dotall() const { return IsDotAll(flags_); }
+
+ private:
+ static const base::uc16 kNoPendingSurrogate = 0;
+ void AddLeadSurrogate(base::uc16 lead_surrogate);
+ void AddTrailSurrogate(base::uc16 trail_surrogate);
+ void FlushPendingSurrogate();
+ void FlushCharacters();
+ void FlushTerms();
+ bool NeedsDesugaringForUnicode(RegExpCharacterClass* cc);
+ bool NeedsDesugaringForIgnoreCase(base::uc32 c);
+ Zone* zone() const { return zone_; }
+ bool unicode() const { return IsUnicode(flags_); }
+
+ Zone* const zone_;
+ bool pending_empty_;
+ const RegExpFlags flags_;
+ ZoneList<base::uc16>* characters_;
+ base::uc16 pending_surrogate_;
+ BufferedZoneList<RegExpTree, 2> terms_;
+ BufferedZoneList<RegExpTree, 2> text_;
+ BufferedZoneList<RegExpTree, 2> alternatives_;
+#ifdef DEBUG
+ enum {ADD_NONE, ADD_CHAR, ADD_TERM, ADD_ASSERT, ADD_ATOM} last_added_;
+#define LAST(x) last_added_ = x;
+#else
+#define LAST(x)
+#endif
+};
+
+enum SubexpressionType {
+ INITIAL,
+ CAPTURE, // All positive values represent captures.
+ POSITIVE_LOOKAROUND,
+ NEGATIVE_LOOKAROUND,
+ GROUPING
+};
+
+class RegExpParserState : public ZoneObject {
+ public:
+ // Push a state on the stack.
+ RegExpParserState(RegExpParserState* previous_state,
+ SubexpressionType group_type,
+ RegExpLookaround::Type lookaround_type,
+ int disjunction_capture_index,
+ const ZoneVector<base::uc16>* capture_name,
+ RegExpFlags flags, Zone* zone)
+ : previous_state_(previous_state),
+ builder_(zone->New<RegExpBuilder>(zone, flags)),
+ group_type_(group_type),
+ lookaround_type_(lookaround_type),
+ disjunction_capture_index_(disjunction_capture_index),
+ capture_name_(capture_name) {}
+ // Parser state of containing expression, if any.
+ RegExpParserState* previous_state() const { return previous_state_; }
+ bool IsSubexpression() { return previous_state_ != nullptr; }
+ // RegExpBuilder building this regexp's AST.
+ RegExpBuilder* builder() const { return builder_; }
+ // Type of regexp being parsed (parenthesized group or entire regexp).
+ SubexpressionType group_type() const { return group_type_; }
+ // Lookahead or Lookbehind.
+ RegExpLookaround::Type lookaround_type() const { return lookaround_type_; }
+ // Index in captures array of first capture in this sub-expression, if any.
+ // Also the capture index of this sub-expression itself, if group_type
+ // is CAPTURE.
+ int capture_index() const { return disjunction_capture_index_; }
+ // The name of the current sub-expression, if group_type is CAPTURE. Only
+ // used for named captures.
+ const ZoneVector<base::uc16>* capture_name() const { return capture_name_; }
+
+ bool IsNamedCapture() const { return capture_name_ != nullptr; }
+
+ // Check whether the parser is inside a capture group with the given index.
+ bool IsInsideCaptureGroup(int index) const {
+ for (const RegExpParserState* s = this; s != nullptr;
+ s = s->previous_state()) {
+ if (s->group_type() != CAPTURE) continue;
+ // Return true if we found the matching capture index.
+ if (index == s->capture_index()) return true;
+ // Abort if index is larger than what has been parsed up till this state.
+ if (index > s->capture_index()) return false;
+ }
+ return false;
+ }
+
+ // Check whether the parser is inside a capture group with the given name.
+ bool IsInsideCaptureGroup(const ZoneVector<base::uc16>* name) const {
+ DCHECK_NOT_NULL(name);
+ for (const RegExpParserState* s = this; s != nullptr;
+ s = s->previous_state()) {
+ if (s->capture_name() == nullptr) continue;
+ if (*s->capture_name() == *name) return true;
+ }
+ return false;
+ }
+
+ private:
+ // Linked list implementation of stack of states.
+ RegExpParserState* const previous_state_;
+ // Builder for the stored disjunction.
+ RegExpBuilder* const builder_;
+ // Stored disjunction type (capture, look-ahead or grouping), if any.
+ const SubexpressionType group_type_;
+ // Stored read direction.
+ const RegExpLookaround::Type lookaround_type_;
+ // Stored disjunction's capture index (if any).
+ const int disjunction_capture_index_;
+ // Stored capture name (if any).
+ const ZoneVector<base::uc16>* const capture_name_;
+};
+
+template <class CharT>
+class RegExpParserImpl final {
+ private:
+ RegExpParserImpl(const CharT* input, int input_length, RegExpFlags flags,
+ uintptr_t stack_limit, Zone* zone,
+ const DisallowGarbageCollection& no_gc);
+
+ bool Parse(RegExpCompileData* result);
+
+ RegExpTree* ParsePattern();
+ RegExpTree* ParseDisjunction();
+ RegExpTree* ParseGroup();
+
+ // Parses a {...,...} quantifier and stores the range in the given
+ // out parameters.
+ bool ParseIntervalQuantifier(int* min_out, int* max_out);
+
+ // Parses and returns a single escaped character. The character
+ // must not be 'b' or 'B' since they are usually handle specially.
+ base::uc32 ParseClassCharacterEscape();
+
+ // Checks whether the following is a length-digit hexadecimal number,
+ // and sets the value if it is.
+ bool ParseHexEscape(int length, base::uc32* value);
+ bool ParseUnicodeEscape(base::uc32* value);
+ bool ParseUnlimitedLengthHexNumber(int max_value, base::uc32* value);
+
+ bool ParsePropertyClassName(ZoneVector<char>* name_1,
+ ZoneVector<char>* name_2);
+ bool AddPropertyClassRange(ZoneList<CharacterRange>* add_to, bool negate,
+ const ZoneVector<char>& name_1,
+ const ZoneVector<char>& name_2);
+
+ RegExpTree* GetPropertySequence(const ZoneVector<char>& name_1);
+ RegExpTree* ParseCharacterClass(const RegExpBuilder* state);
+
+ base::uc32 ParseOctalLiteral();
+
+ // Tries to parse the input as a back reference. If successful it
+ // stores the result in the output parameter and returns true. If
+ // it fails it will push back the characters read so the same characters
+ // can be reparsed.
+ bool ParseBackReferenceIndex(int* index_out);
+
+ // Parse inside a class. Either add escaped class to the range, or return
+ // false and pass parsed single character through |char_out|.
+ void ParseClassEscape(ZoneList<CharacterRange>* ranges, Zone* zone,
+ bool add_unicode_case_equivalents, base::uc32* char_out,
+ bool* is_class_escape);
+
+ char ParseClassEscape();
+
+ RegExpTree* ReportError(RegExpError error);
+ void Advance();
+ void Advance(int dist);
+ void Reset(int pos);
+
+ // Reports whether the pattern might be used as a literal search string.
+ // Only use if the result of the parse is a single atom node.
+ bool simple();
+ bool contains_anchor() { return contains_anchor_; }
+ void set_contains_anchor() { contains_anchor_ = true; }
+ int captures_started() { return captures_started_; }
+ int position() { return next_pos_ - 1; }
+ bool failed() { return failed_; }
+ bool unicode() const { return IsUnicode(top_level_flags_); }
+
+ static bool IsSyntaxCharacterOrSlash(base::uc32 c);
+
+ static const base::uc32 kEndMarker = (1 << 21);
+
+ private:
+ // Return the 1-indexed RegExpCapture object, allocate if necessary.
+ RegExpCapture* GetCapture(int index);
+
+ // Creates a new named capture at the specified index. Must be called exactly
+ // once for each named capture. Fails if a capture with the same name is
+ // encountered.
+ bool CreateNamedCaptureAtIndex(const ZoneVector<base::uc16>* name, int index);
+
+ // Parses the name of a capture group (?<name>pattern). The name must adhere
+ // to IdentifierName in the ECMAScript standard.
+ const ZoneVector<base::uc16>* ParseCaptureGroupName();
+
+ bool ParseNamedBackReference(RegExpBuilder* builder,
+ RegExpParserState* state);
+ RegExpParserState* ParseOpenParenthesis(RegExpParserState* state);
+
+ // After the initial parsing pass, patch corresponding RegExpCapture objects
+ // into all RegExpBackReferences. This is done after initial parsing in order
+ // to avoid complicating cases in which references comes before the capture.
+ void PatchNamedBackReferences();
+
+ ZoneVector<RegExpCapture*>* GetNamedCaptures() const;
+
+ // Returns true iff the pattern contains named captures. May call
+ // ScanForCaptures to look ahead at the remaining pattern.
+ bool HasNamedCaptures();
+
+ Zone* zone() const { return zone_; }
+
+ base::uc32 current() { return current_; }
+ bool has_more() { return has_more_; }
+ bool has_next() { return next_pos_ < input_length(); }
+ base::uc32 Next();
+ template <bool update_position>
+ base::uc32 ReadNext();
+ CharT InputAt(int index) const {
+ DCHECK(0 <= index && index < input_length());
+ return input_[index];
+ }
+ int input_length() const { return input_length_; }
+ void ScanForCaptures();
+
+ struct RegExpCaptureNameLess {
+ bool operator()(const RegExpCapture* lhs, const RegExpCapture* rhs) const {
+ DCHECK_NOT_NULL(lhs);
+ DCHECK_NOT_NULL(rhs);
+ return *lhs->name() < *rhs->name();
+ }
+ };
+
+ const DisallowGarbageCollection no_gc_;
+ Zone* const zone_;
+ RegExpError error_ = RegExpError::kNone;
+ int error_pos_ = 0;
+ ZoneList<RegExpCapture*>* captures_;
+ ZoneSet<RegExpCapture*, RegExpCaptureNameLess>* named_captures_;
+ ZoneList<RegExpBackReference*>* named_back_references_;
+ const CharT* const input_;
+ const int input_length_;
+ base::uc32 current_;
+ const RegExpFlags top_level_flags_;
+ int next_pos_;
+ int captures_started_;
+ int capture_count_; // Only valid after we have scanned for captures.
+ bool has_more_;
+ bool simple_;
+ bool contains_anchor_;
+ bool is_scanned_for_captures_;
+ bool has_named_captures_; // Only valid after we have scanned for captures.
+ bool failed_;
+ const uintptr_t stack_limit_;
+
+ friend bool RegExpParser::ParseRegExpFromHeapString(Isolate*, Zone*,
+ Handle<String>,
+ RegExpFlags,
+ RegExpCompileData*);
+ friend bool RegExpParser::VerifyRegExpSyntax<CharT>(
+ Zone*, uintptr_t, const CharT*, int, RegExpFlags, RegExpCompileData*,
+ const DisallowGarbageCollection&);
+};
+
+template <class CharT>
+RegExpParserImpl<CharT>::RegExpParserImpl(
+ const CharT* input, int input_length, RegExpFlags flags,
+ uintptr_t stack_limit, Zone* zone, const DisallowGarbageCollection& no_gc)
+ : zone_(zone),
captures_(nullptr),
named_captures_(nullptr),
named_back_references_(nullptr),
- in_(in),
+ input_(input),
+ input_length_(input_length),
current_(kEndMarker),
top_level_flags_(flags),
next_pos_(0),
@@ -42,30 +411,44 @@ RegExpParser::RegExpParser(FlatStringReader* in, JSRegExp::Flags flags,
contains_anchor_(false),
is_scanned_for_captures_(false),
has_named_captures_(false),
- failed_(false) {
+ failed_(false),
+ stack_limit_(stack_limit) {
Advance();
}
+template <>
template <bool update_position>
-inline base::uc32 RegExpParser::ReadNext() {
+inline base::uc32 RegExpParserImpl<uint8_t>::ReadNext() {
int position = next_pos_;
- base::uc32 c0 = in()->Get(position);
+ base::uc16 c0 = InputAt(position);
+ position++;
+ DCHECK(!unibrow::Utf16::IsLeadSurrogate(c0));
+ if (update_position) next_pos_ = position;
+ return c0;
+}
+
+template <>
+template <bool update_position>
+inline base::uc32 RegExpParserImpl<base::uc16>::ReadNext() {
+ int position = next_pos_;
+ base::uc16 c0 = InputAt(position);
+ base::uc32 result = c0;
position++;
// Read the whole surrogate pair in case of unicode flag, if possible.
- if (unicode() && position < in()->length() &&
- unibrow::Utf16::IsLeadSurrogate(static_cast<base::uc16>(c0))) {
- base::uc16 c1 = in()->Get(position);
+ if (unicode() && position < input_length() &&
+ unibrow::Utf16::IsLeadSurrogate(c0)) {
+ base::uc16 c1 = InputAt(position);
if (unibrow::Utf16::IsTrailSurrogate(c1)) {
- c0 =
- unibrow::Utf16::CombineSurrogatePair(static_cast<base::uc16>(c0), c1);
+ result = unibrow::Utf16::CombineSurrogatePair(c0, c1);
position++;
}
}
if (update_position) next_pos_ = position;
- return c0;
+ return result;
}
-base::uc32 RegExpParser::Next() {
+template <class CharT>
+base::uc32 RegExpParserImpl<CharT>::Next() {
if (has_next()) {
return ReadNext<false>();
} else {
@@ -73,10 +456,10 @@ base::uc32 RegExpParser::Next() {
}
}
-void RegExpParser::Advance() {
+template <class CharT>
+void RegExpParserImpl<CharT>::Advance() {
if (has_next()) {
- StackLimitCheck check(isolate());
- if (check.HasOverflowed()) {
+ if (GetCurrentStackPosition() < stack_limit_) {
if (FLAG_correctness_fuzzer_suppressions) {
FATAL("Aborting on stack overflow");
}
@@ -93,27 +476,31 @@ void RegExpParser::Advance() {
current_ = kEndMarker;
// Advance so that position() points to 1-after-the-last-character. This is
// important so that Reset() to this position works correctly.
- next_pos_ = in()->length() + 1;
+ next_pos_ = input_length() + 1;
has_more_ = false;
}
}
-
-void RegExpParser::Reset(int pos) {
+template <class CharT>
+void RegExpParserImpl<CharT>::Reset(int pos) {
next_pos_ = pos;
- has_more_ = (pos < in()->length());
+ has_more_ = (pos < input_length());
Advance();
}
-void RegExpParser::Advance(int dist) {
+template <class CharT>
+void RegExpParserImpl<CharT>::Advance(int dist) {
next_pos_ += dist - 1;
Advance();
}
+template <class CharT>
+bool RegExpParserImpl<CharT>::simple() {
+ return simple_;
+}
-bool RegExpParser::simple() { return simple_; }
-
-bool RegExpParser::IsSyntaxCharacterOrSlash(base::uc32 c) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::IsSyntaxCharacterOrSlash(base::uc32 c) {
switch (c) {
case '^':
case '$':
@@ -137,14 +524,15 @@ bool RegExpParser::IsSyntaxCharacterOrSlash(base::uc32 c) {
return false;
}
-RegExpTree* RegExpParser::ReportError(RegExpError error) {
+template <class CharT>
+RegExpTree* RegExpParserImpl<CharT>::ReportError(RegExpError error) {
if (failed_) return nullptr; // Do not overwrite any existing error.
failed_ = true;
error_ = error;
error_pos_ = position();
// Zip to the end to make sure no more input is read.
current_ = kEndMarker;
- next_pos_ = in()->length();
+ next_pos_ = input_length();
return nullptr;
}
@@ -154,19 +542,19 @@ RegExpTree* RegExpParser::ReportError(RegExpError error) {
// Pattern ::
// Disjunction
-RegExpTree* RegExpParser::ParsePattern() {
+template <class CharT>
+RegExpTree* RegExpParserImpl<CharT>::ParsePattern() {
RegExpTree* result = ParseDisjunction(CHECK_FAILED);
PatchNamedBackReferences(CHECK_FAILED);
DCHECK(!has_more());
// If the result of parsing is a literal string atom, and it has the
// same length as the input, then the atom is identical to the input.
- if (result->IsAtom() && result->AsAtom()->length() == in()->length()) {
+ if (result->IsAtom() && result->AsAtom()->length() == input_length()) {
simple_ = true;
}
return result;
}
-
// Disjunction ::
// Alternative
// Alternative | Disjunction
@@ -177,7 +565,8 @@ RegExpTree* RegExpParser::ParsePattern() {
// Assertion
// Atom
// Atom Quantifier
-RegExpTree* RegExpParser::ParseDisjunction() {
+template <class CharT>
+RegExpTree* RegExpParserImpl<CharT>::ParseDisjunction() {
// Used to store current state while parsing subexpressions.
RegExpParserState initial_state(nullptr, INITIAL, RegExpLookaround::LOOKAHEAD,
0, nullptr, top_level_flags_, zone());
@@ -220,12 +609,12 @@ RegExpTree* RegExpParser::ParseDisjunction() {
capture->set_body(body);
body = capture;
} else if (group_type == GROUPING) {
- body = zone()->New<RegExpGroup>(body);
+ body = zone()->template New<RegExpGroup>(body);
} else {
DCHECK(group_type == POSITIVE_LOOKAROUND ||
group_type == NEGATIVE_LOOKAROUND);
bool is_positive = (group_type == POSITIVE_LOOKAROUND);
- body = zone()->New<RegExpLookaround>(
+ body = zone()->template New<RegExpLookaround>(
body, is_positive, end_capture_index - capture_index,
capture_index, state->lookaround_type());
}
@@ -250,7 +639,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
return ReportError(RegExpError::kNothingToRepeat);
case '^': {
Advance();
- builder->AddAssertion(zone()->New<RegExpAssertion>(
+ builder->AddAssertion(zone()->template New<RegExpAssertion>(
builder->multiline() ? RegExpAssertion::START_OF_LINE
: RegExpAssertion::START_OF_INPUT));
set_contains_anchor();
@@ -261,13 +650,14 @@ RegExpTree* RegExpParser::ParseDisjunction() {
RegExpAssertion::AssertionType assertion_type =
builder->multiline() ? RegExpAssertion::END_OF_LINE
: RegExpAssertion::END_OF_INPUT;
- builder->AddAssertion(zone()->New<RegExpAssertion>(assertion_type));
+ builder->AddAssertion(
+ zone()->template New<RegExpAssertion>(assertion_type));
continue;
}
case '.': {
Advance();
ZoneList<CharacterRange>* ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
if (builder->dotall()) {
// Everything.
@@ -278,7 +668,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
}
RegExpCharacterClass* cc =
- zone()->New<RegExpCharacterClass>(zone(), ranges);
+ zone()->template New<RegExpCharacterClass>(zone(), ranges);
builder->AddCharacterClass(cc);
break;
}
@@ -300,13 +690,13 @@ RegExpTree* RegExpParser::ParseDisjunction() {
return ReportError(RegExpError::kEscapeAtEndOfPattern);
case 'b':
Advance(2);
- builder->AddAssertion(
- zone()->New<RegExpAssertion>(RegExpAssertion::BOUNDARY));
+ builder->AddAssertion(zone()->template New<RegExpAssertion>(
+ RegExpAssertion::BOUNDARY));
continue;
case 'B':
Advance(2);
- builder->AddAssertion(
- zone()->New<RegExpAssertion>(RegExpAssertion::NON_BOUNDARY));
+ builder->AddAssertion(zone()->template New<RegExpAssertion>(
+ RegExpAssertion::NON_BOUNDARY));
continue;
// AtomEscape ::
// CharacterClassEscape
@@ -322,11 +712,11 @@ RegExpTree* RegExpParser::ParseDisjunction() {
base::uc32 c = Next();
Advance(2);
ZoneList<CharacterRange>* ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
CharacterRange::AddClassEscape(
c, ranges, unicode() && builder->ignore_case(), zone());
RegExpCharacterClass* cc =
- zone()->New<RegExpCharacterClass>(zone(), ranges);
+ zone()->template New<RegExpCharacterClass>(zone(), ranges);
builder->AddCharacterClass(cc);
break;
}
@@ -336,13 +726,14 @@ RegExpTree* RegExpParser::ParseDisjunction() {
Advance(2);
if (unicode()) {
ZoneList<CharacterRange>* ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
ZoneVector<char> name_1(zone());
ZoneVector<char> name_2(zone());
if (ParsePropertyClassName(&name_1, &name_2)) {
if (AddPropertyClassRange(ranges, p == 'P', name_1, name_2)) {
RegExpCharacterClass* cc =
- zone()->New<RegExpCharacterClass>(zone(), ranges);
+ zone()->template New<RegExpCharacterClass>(zone(),
+ ranges);
builder->AddCharacterClass(cc);
break;
}
@@ -381,8 +772,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
builder->AddEmpty();
} else {
RegExpCapture* capture = GetCapture(index);
- RegExpTree* atom =
- zone()->New<RegExpBackReference>(capture, builder->flags());
+ RegExpTree* atom = zone()->template New<RegExpBackReference>(
+ capture, builder->flags());
builder->AddAtom(atom);
}
break;
@@ -575,12 +966,11 @@ RegExpTree* RegExpParser::ParseDisjunction() {
}
}
-RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
+template <class CharT>
+RegExpParserState* RegExpParserImpl<CharT>::ParseOpenParenthesis(
RegExpParserState* state) {
RegExpLookaround::Type lookaround_type = state->lookaround_type();
bool is_named_capture = false;
- JSRegExp::Flags switch_on = JSRegExp::kNone;
- JSRegExp::Flags switch_off = JSRegExp::kNone;
const ZoneVector<base::uc16>* capture_name = nullptr;
SubexpressionType subexpr_type = CAPTURE;
Advance();
@@ -623,7 +1013,7 @@ RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
}
}
if (subexpr_type == CAPTURE) {
- if (captures_started_ >= JSRegExp::kMaxCaptures) {
+ if (captures_started_ >= RegExpMacroAssembler::kMaxRegisterCount) {
ReportError(RegExpError::kTooManyCaptures);
return nullptr;
}
@@ -633,11 +1023,10 @@ RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
capture_name = ParseCaptureGroupName(CHECK_FAILED);
}
}
- JSRegExp::Flags flags = (state->builder()->flags() | switch_on) & ~switch_off;
// Store current state and begin new disjunction parsing.
- return zone()->New<RegExpParserState>(state, subexpr_type, lookaround_type,
- captures_started_, capture_name, flags,
- zone());
+ return zone()->template New<RegExpParserState>(
+ state, subexpr_type, lookaround_type, captures_started_, capture_name,
+ state->builder()->flags(), zone());
}
#ifdef DEBUG
@@ -657,14 +1046,14 @@ static bool IsSpecialClassEscape(base::uc32 c) {
}
#endif
-
// In order to know whether an escape is a backreference or not we have to scan
// the entire regexp and find the number of capturing parentheses. However we
// don't want to scan the regexp twice unless it is necessary. This mini-parser
// is called when needed. It can see the difference between capturing and
// noncapturing parentheses and can skip character classes and backslash-escaped
// characters.
-void RegExpParser::ScanForCaptures() {
+template <class CharT>
+void RegExpParserImpl<CharT>::ScanForCaptures() {
DCHECK(!is_scanned_for_captures_);
const int saved_position = position();
// Start with captures started previous to current position
@@ -718,8 +1107,8 @@ void RegExpParser::ScanForCaptures() {
Reset(saved_position);
}
-
-bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParseBackReferenceIndex(int* index_out) {
DCHECK_EQ('\\', current());
DCHECK('1' <= Next() && Next() <= '9');
// Try to parse a decimal literal that is no greater than the total number
@@ -731,7 +1120,7 @@ bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
base::uc32 c = current();
if (IsDecimalDigit(c)) {
value = 10 * value + (c - '0');
- if (value > JSRegExp::kMaxCaptures) {
+ if (value > RegExpMacroAssembler::kMaxRegisterCount) {
Reset(start);
return false;
}
@@ -751,7 +1140,9 @@ bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
return true;
}
-static void push_code_unit(ZoneVector<base::uc16>* v, uint32_t code_unit) {
+namespace {
+
+void push_code_unit(ZoneVector<base::uc16>* v, uint32_t code_unit) {
if (code_unit <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
v->push_back(code_unit);
} else {
@@ -760,8 +1151,12 @@ static void push_code_unit(ZoneVector<base::uc16>* v, uint32_t code_unit) {
}
}
-const ZoneVector<base::uc16>* RegExpParser::ParseCaptureGroupName() {
- ZoneVector<base::uc16>* name = zone()->New<ZoneVector<base::uc16>>(zone());
+} // namespace
+
+template <class CharT>
+const ZoneVector<base::uc16>* RegExpParserImpl<CharT>::ParseCaptureGroupName() {
+ ZoneVector<base::uc16>* name =
+ zone()->template New<ZoneVector<base::uc16>>(zone());
bool at_start = true;
while (true) {
@@ -805,8 +1200,9 @@ const ZoneVector<base::uc16>* RegExpParser::ParseCaptureGroupName() {
return name;
}
-bool RegExpParser::CreateNamedCaptureAtIndex(const ZoneVector<base::uc16>* name,
- int index) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::CreateNamedCaptureAtIndex(
+ const ZoneVector<base::uc16>* name, int index) {
DCHECK(0 < index && index <= captures_started_);
DCHECK_NOT_NULL(name);
@@ -817,7 +1213,8 @@ bool RegExpParser::CreateNamedCaptureAtIndex(const ZoneVector<base::uc16>* name,
if (named_captures_ == nullptr) {
named_captures_ =
- zone_->New<ZoneSet<RegExpCapture*, RegExpCaptureNameLess>>(zone());
+ zone_->template New<ZoneSet<RegExpCapture*, RegExpCaptureNameLess>>(
+ zone());
} else {
// Check for duplicates and bail if we find any.
@@ -833,8 +1230,9 @@ bool RegExpParser::CreateNamedCaptureAtIndex(const ZoneVector<base::uc16>* name,
return true;
}
-bool RegExpParser::ParseNamedBackReference(RegExpBuilder* builder,
- RegExpParserState* state) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParseNamedBackReference(
+ RegExpBuilder* builder, RegExpParserState* state) {
// The parser is assumed to be on the '<' in \k<name>.
if (current() != '<') {
ReportError(RegExpError::kInvalidNamedReference);
@@ -851,14 +1249,14 @@ bool RegExpParser::ParseNamedBackReference(RegExpBuilder* builder,
builder->AddEmpty();
} else {
RegExpBackReference* atom =
- zone()->New<RegExpBackReference>(builder->flags());
+ zone()->template New<RegExpBackReference>(builder->flags());
atom->set_name(name);
builder->AddAtom(atom);
if (named_back_references_ == nullptr) {
named_back_references_ =
- zone()->New<ZoneList<RegExpBackReference*>>(1, zone());
+ zone()->template New<ZoneList<RegExpBackReference*>>(1, zone());
}
named_back_references_->Add(atom, zone());
}
@@ -866,7 +1264,8 @@ bool RegExpParser::ParseNamedBackReference(RegExpBuilder* builder,
return true;
}
-void RegExpParser::PatchNamedBackReferences() {
+template <class CharT>
+void RegExpParserImpl<CharT>::PatchNamedBackReferences() {
if (named_back_references_ == nullptr) return;
if (named_captures_ == nullptr) {
@@ -882,7 +1281,8 @@ void RegExpParser::PatchNamedBackReferences() {
// Capture used to search the named_captures_ by name, index of the
// capture is never used.
static const int kInvalidIndex = 0;
- RegExpCapture* search_capture = zone()->New<RegExpCapture>(kInvalidIndex);
+ RegExpCapture* search_capture =
+ zone()->template New<RegExpCapture>(kInvalidIndex);
DCHECK_NULL(search_capture->name());
search_capture->set_name(ref->name());
@@ -899,70 +1299,36 @@ void RegExpParser::PatchNamedBackReferences() {
}
}
-RegExpCapture* RegExpParser::GetCapture(int index) {
+template <class CharT>
+RegExpCapture* RegExpParserImpl<CharT>::GetCapture(int index) {
// The index for the capture groups are one-based. Its index in the list is
// zero-based.
int know_captures =
is_scanned_for_captures_ ? capture_count_ : captures_started_;
DCHECK(index <= know_captures);
if (captures_ == nullptr) {
- captures_ = zone()->New<ZoneList<RegExpCapture*>>(know_captures, zone());
+ captures_ =
+ zone()->template New<ZoneList<RegExpCapture*>>(know_captures, zone());
}
while (captures_->length() < know_captures) {
- captures_->Add(zone()->New<RegExpCapture>(captures_->length() + 1), zone());
+ captures_->Add(zone()->template New<RegExpCapture>(captures_->length() + 1),
+ zone());
}
return captures_->at(index - 1);
}
-namespace {
-
-struct RegExpCaptureIndexLess {
- bool operator()(const RegExpCapture* lhs, const RegExpCapture* rhs) const {
- DCHECK_NOT_NULL(lhs);
- DCHECK_NOT_NULL(rhs);
- return lhs->index() < rhs->index();
- }
-};
-
-} // namespace
-
-Handle<FixedArray> RegExpParser::CreateCaptureNameMap() {
+template <class CharT>
+ZoneVector<RegExpCapture*>* RegExpParserImpl<CharT>::GetNamedCaptures() const {
if (named_captures_ == nullptr || named_captures_->empty()) {
- return Handle<FixedArray>();
+ return nullptr;
}
- // Named captures are sorted by name (because the set is used to ensure
- // name uniqueness). But the capture name map must to be sorted by index.
-
- ZoneVector<RegExpCapture*> sorted_named_captures(
+ return zone()->template New<ZoneVector<RegExpCapture*>>(
named_captures_->begin(), named_captures_->end(), zone());
- std::sort(sorted_named_captures.begin(), sorted_named_captures.end(),
- RegExpCaptureIndexLess{});
- DCHECK_EQ(sorted_named_captures.size(), named_captures_->size());
-
- Factory* factory = isolate()->factory();
-
- int len = static_cast<int>(sorted_named_captures.size()) * 2;
- Handle<FixedArray> array = factory->NewFixedArray(len);
-
- int i = 0;
- for (const auto& capture : sorted_named_captures) {
- base::Vector<const base::uc16> capture_name(capture->name()->data(),
- capture->name()->size());
- // CSA code in ConstructNewResultFromMatchInfo requires these strings to be
- // internalized so they can be used as property names in the 'exec' results.
- Handle<String> name = factory->InternalizeString(capture_name);
- array->set(i * 2, *name);
- array->set(i * 2 + 1, Smi::FromInt(capture->index()));
-
- i++;
- }
- DCHECK_EQ(i * 2, len);
-
- return array;
}
-bool RegExpParser::HasNamedCaptures() {
+template <class CharT>
+bool RegExpParserImpl<CharT>::HasNamedCaptures() {
if (has_named_captures_ || is_scanned_for_captures_) {
return has_named_captures_;
}
@@ -972,27 +1338,6 @@ bool RegExpParser::HasNamedCaptures() {
return has_named_captures_;
}
-bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(int index) {
- for (RegExpParserState* s = this; s != nullptr; s = s->previous_state()) {
- if (s->group_type() != CAPTURE) continue;
- // Return true if we found the matching capture index.
- if (index == s->capture_index()) return true;
- // Abort if index is larger than what has been parsed up till this state.
- if (index > s->capture_index()) return false;
- }
- return false;
-}
-
-bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(
- const ZoneVector<base::uc16>* name) {
- DCHECK_NOT_NULL(name);
- for (RegExpParserState* s = this; s != nullptr; s = s->previous_state()) {
- if (s->capture_name() == nullptr) continue;
- if (*s->capture_name() == *name) return true;
- }
- return false;
-}
-
// QuantifierPrefix ::
// { DecimalDigits }
// { DecimalDigits , }
@@ -1000,7 +1345,9 @@ bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(
//
// Returns true if parsing succeeds, and set the min_out and max_out
// values. Values are truncated to RegExpTree::kInfinity if they overflow.
-bool RegExpParser::ParseIntervalQuantifier(int* min_out, int* max_out) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParseIntervalQuantifier(int* min_out,
+ int* max_out) {
DCHECK_EQ(current(), '{');
int start = position();
Advance();
@@ -1059,7 +1406,8 @@ bool RegExpParser::ParseIntervalQuantifier(int* min_out, int* max_out) {
return true;
}
-base::uc32 RegExpParser::ParseOctalLiteral() {
+template <class CharT>
+base::uc32 RegExpParserImpl<CharT>::ParseOctalLiteral() {
DCHECK(('0' <= current() && current() <= '7') || current() == kEndMarker);
// For compatibility with some other browsers (not all), we parse
// up to three octal digits with a value below 256.
@@ -1077,7 +1425,8 @@ base::uc32 RegExpParser::ParseOctalLiteral() {
return value;
}
-bool RegExpParser::ParseHexEscape(int length, base::uc32* value) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParseHexEscape(int length, base::uc32* value) {
int start = position();
base::uc32 val = 0;
for (int i = 0; i < length; ++i) {
@@ -1095,7 +1444,8 @@ bool RegExpParser::ParseHexEscape(int length, base::uc32* value) {
}
// This parses RegExpUnicodeEscapeSequence as described in ECMA262.
-bool RegExpParser::ParseUnicodeEscape(base::uc32* value) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParseUnicodeEscape(base::uc32* value) {
// Accept both \uxxxx and \u{xxxxxx} (if harmony unicode escapes are
// allowed). In the latter case, the number of hex digits between { } is
// arbitrary. \ and u have already been read.
@@ -1308,10 +1658,11 @@ bool IsUnicodePropertyValueCharacter(char c) {
return (c == '_');
}
-} // anonymous namespace
+} // namespace
-bool RegExpParser::ParsePropertyClassName(ZoneVector<char>* name_1,
- ZoneVector<char>* name_2) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParsePropertyClassName(ZoneVector<char>* name_1,
+ ZoneVector<char>* name_2) {
DCHECK(name_1->empty());
DCHECK(name_2->empty());
// Parse the property class as follows:
@@ -1348,10 +1699,10 @@ bool RegExpParser::ParsePropertyClassName(ZoneVector<char>* name_1,
return true;
}
-bool RegExpParser::AddPropertyClassRange(ZoneList<CharacterRange>* add_to,
- bool negate,
- const ZoneVector<char>& name_1,
- const ZoneVector<char>& name_2) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::AddPropertyClassRange(
+ ZoneList<CharacterRange>* add_to, bool negate,
+ const ZoneVector<char>& name_1, const ZoneVector<char>& name_2) {
if (name_2.empty()) {
// First attempt to interpret as general category property value name.
const char* name = name_1.data();
@@ -1388,11 +1739,13 @@ bool RegExpParser::AddPropertyClassRange(ZoneList<CharacterRange>* add_to,
}
}
-RegExpTree* RegExpParser::GetPropertySequence(const ZoneVector<char>& name_1) {
+template <class CharT>
+RegExpTree* RegExpParserImpl<CharT>::GetPropertySequence(
+ const ZoneVector<char>& name_1) {
if (!FLAG_harmony_regexp_sequence) return nullptr;
const char* name = name_1.data();
const base::uc32* sequence_list = nullptr;
- JSRegExp::Flags flags = JSRegExp::kUnicode;
+ RegExpFlags flags = RegExpFlag::kUnicode;
if (NameEquals(name, "Emoji_Flag_Sequence")) {
sequence_list = UnicodePropertySequences::kEmojiFlagSequences;
} else if (NameEquals(name, "Emoji_Tag_Sequence")) {
@@ -1421,12 +1774,12 @@ RegExpTree* RegExpParser::GetPropertySequence(const ZoneVector<char>& name_1) {
// emoji_keycap_sequence := [0-9#*] \x{FE0F 20E3}
RegExpBuilder builder(zone(), flags);
ZoneList<CharacterRange>* prefix_ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
prefix_ranges->Add(CharacterRange::Range('0', '9'), zone());
prefix_ranges->Add(CharacterRange::Singleton('#'), zone());
prefix_ranges->Add(CharacterRange::Singleton('*'), zone());
builder.AddCharacterClass(
- zone()->New<RegExpCharacterClass>(zone(), prefix_ranges));
+ zone()->template New<RegExpCharacterClass>(zone(), prefix_ranges));
builder.AddCharacter(0xFE0F);
builder.AddCharacter(0x20E3);
return builder.ToRegExp();
@@ -1435,17 +1788,17 @@ RegExpTree* RegExpParser::GetPropertySequence(const ZoneVector<char>& name_1) {
// emoji_modifier_sequence := emoji_modifier_base emoji_modifier
RegExpBuilder builder(zone(), flags);
ZoneList<CharacterRange>* modifier_base_ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
LookupPropertyValueName(UCHAR_EMOJI_MODIFIER_BASE, "Y", false,
modifier_base_ranges, zone());
- builder.AddCharacterClass(
- zone()->New<RegExpCharacterClass>(zone(), modifier_base_ranges));
+ builder.AddCharacterClass(zone()->template New<RegExpCharacterClass>(
+ zone(), modifier_base_ranges));
ZoneList<CharacterRange>* modifier_ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
LookupPropertyValueName(UCHAR_EMOJI_MODIFIER, "Y", false, modifier_ranges,
zone());
builder.AddCharacterClass(
- zone()->New<RegExpCharacterClass>(zone(), modifier_ranges));
+ zone()->template New<RegExpCharacterClass>(zone(), modifier_ranges));
return builder.ToRegExp();
}
@@ -1454,26 +1807,30 @@ RegExpTree* RegExpParser::GetPropertySequence(const ZoneVector<char>& name_1) {
#else // V8_INTL_SUPPORT
-bool RegExpParser::ParsePropertyClassName(ZoneVector<char>* name_1,
- ZoneVector<char>* name_2) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParsePropertyClassName(ZoneVector<char>* name_1,
+ ZoneVector<char>* name_2) {
return false;
}
-bool RegExpParser::AddPropertyClassRange(ZoneList<CharacterRange>* add_to,
- bool negate,
- const ZoneVector<char>& name_1,
- const ZoneVector<char>& name_2) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::AddPropertyClassRange(
+ ZoneList<CharacterRange>* add_to, bool negate,
+ const ZoneVector<char>& name_1, const ZoneVector<char>& name_2) {
return false;
}
-RegExpTree* RegExpParser::GetPropertySequence(const ZoneVector<char>& name) {
+template <class CharT>
+RegExpTree* RegExpParserImpl<CharT>::GetPropertySequence(
+ const ZoneVector<char>& name) {
return nullptr;
}
#endif // V8_INTL_SUPPORT
-bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value,
- base::uc32* value) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParseUnlimitedLengthHexNumber(int max_value,
+ base::uc32* value) {
base::uc32 x = 0;
int d = base::HexValue(current());
if (d < 0) {
@@ -1491,7 +1848,8 @@ bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value,
return true;
}
-base::uc32 RegExpParser::ParseClassCharacterEscape() {
+template <class CharT>
+base::uc32 RegExpParserImpl<CharT>::ParseClassCharacterEscape() {
DCHECK_EQ('\\', current());
DCHECK(has_next() && !IsSpecialClassEscape(Next()));
Advance();
@@ -1608,11 +1966,11 @@ base::uc32 RegExpParser::ParseClassCharacterEscape() {
UNREACHABLE();
}
-void RegExpParser::ParseClassEscape(ZoneList<CharacterRange>* ranges,
- Zone* zone,
- bool add_unicode_case_equivalents,
- base::uc32* char_out,
- bool* is_class_escape) {
+template <class CharT>
+void RegExpParserImpl<CharT>::ParseClassEscape(
+ ZoneList<CharacterRange>* ranges, Zone* zone,
+ bool add_unicode_case_equivalents, base::uc32* char_out,
+ bool* is_class_escape) {
base::uc32 current_char = current();
if (current_char == '\\') {
switch (Next()) {
@@ -1658,7 +2016,9 @@ void RegExpParser::ParseClassEscape(ZoneList<CharacterRange>* ranges,
}
}
-RegExpTree* RegExpParser::ParseCharacterClass(const RegExpBuilder* builder) {
+template <class CharT>
+RegExpTree* RegExpParserImpl<CharT>::ParseCharacterClass(
+ const RegExpBuilder* builder) {
DCHECK_EQ(current(), '[');
Advance();
bool is_negated = false;
@@ -1667,7 +2027,7 @@ RegExpTree* RegExpParser::ParseCharacterClass(const RegExpBuilder* builder) {
Advance();
}
ZoneList<CharacterRange>* ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
bool add_unicode_case_equivalents = unicode() && builder->ignore_case();
while (has_more() && current() != ']') {
base::uc32 char_1, char_2;
@@ -1713,15 +2073,14 @@ RegExpTree* RegExpParser::ParseCharacterClass(const RegExpBuilder* builder) {
Advance();
RegExpCharacterClass::CharacterClassFlags character_class_flags;
if (is_negated) character_class_flags = RegExpCharacterClass::NEGATED;
- return zone()->New<RegExpCharacterClass>(zone(), ranges,
- character_class_flags);
+ return zone()->template New<RegExpCharacterClass>(zone(), ranges,
+ character_class_flags);
}
-
#undef CHECK_FAILED
-bool RegExpParser::Parse(RegExpCompileData* result,
- const DisallowGarbageCollection&) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::Parse(RegExpCompileData* result) {
DCHECK(result != nullptr);
RegExpTree* tree = ParsePattern();
if (failed()) {
@@ -1742,35 +2101,12 @@ bool RegExpParser::Parse(RegExpCompileData* result,
result->simple = tree->IsAtom() && simple() && capture_count == 0;
result->contains_anchor = contains_anchor();
result->capture_count = capture_count;
+ result->named_captures = GetNamedCaptures();
}
return !failed();
}
-bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
- FlatStringReader* input, JSRegExp::Flags flags,
- RegExpCompileData* result) {
- RegExpParser parser(input, flags, isolate, zone);
- bool success;
- {
- DisallowGarbageCollection no_gc;
- success = parser.Parse(result, no_gc);
- }
- if (success) {
- result->capture_name_map = parser.CreateCaptureNameMap();
- }
- return success;
-}
-
-bool RegExpParser::VerifyRegExpSyntax(Isolate* isolate, Zone* zone,
- FlatStringReader* input,
- JSRegExp::Flags flags,
- RegExpCompileData* result,
- const DisallowGarbageCollection& no_gc) {
- RegExpParser parser(input, flags, isolate, zone);
- return parser.Parse(result, no_gc);
-}
-
-RegExpBuilder::RegExpBuilder(Zone* zone, JSRegExp::Flags flags)
+RegExpBuilder::RegExpBuilder(Zone* zone, RegExpFlags flags)
: zone_(zone),
pending_empty_(false),
flags_(flags),
@@ -2054,5 +2390,58 @@ bool RegExpBuilder::AddQuantifierToAtom(
return true;
}
+template class RegExpParserImpl<uint8_t>;
+template class RegExpParserImpl<base::uc16>;
+
+} // namespace
+
+// static
+bool RegExpParser::ParseRegExpFromHeapString(Isolate* isolate, Zone* zone,
+ Handle<String> input,
+ RegExpFlags flags,
+ RegExpCompileData* result) {
+ DisallowGarbageCollection no_gc;
+ uintptr_t stack_limit = isolate->stack_guard()->real_climit();
+ String::FlatContent content = input->GetFlatContent(no_gc);
+ if (content.IsOneByte()) {
+ base::Vector<const uint8_t> v = content.ToOneByteVector();
+ return RegExpParserImpl<uint8_t>{v.begin(), v.length(), flags,
+ stack_limit, zone, no_gc}
+ .Parse(result);
+ } else {
+ base::Vector<const base::uc16> v = content.ToUC16Vector();
+ return RegExpParserImpl<base::uc16>{v.begin(), v.length(), flags,
+ stack_limit, zone, no_gc}
+ .Parse(result);
+ }
+}
+
+// static
+template <class CharT>
+bool RegExpParser::VerifyRegExpSyntax(Zone* zone, uintptr_t stack_limit,
+ const CharT* input, int input_length,
+ RegExpFlags flags,
+ RegExpCompileData* result,
+ const DisallowGarbageCollection& no_gc) {
+ return RegExpParserImpl<CharT>{input, input_length, flags,
+ stack_limit, zone, no_gc}
+ .Parse(result);
+}
+
+template bool RegExpParser::VerifyRegExpSyntax<uint8_t>(
+ Zone*, uintptr_t, const uint8_t*, int, RegExpFlags, RegExpCompileData*,
+ const DisallowGarbageCollection&);
+template bool RegExpParser::VerifyRegExpSyntax<base::uc16>(
+ Zone*, uintptr_t, const base::uc16*, int, RegExpFlags, RegExpCompileData*,
+ const DisallowGarbageCollection&);
+
+// static
+bool RegExpParser::VerifyRegExpSyntax(Isolate* isolate, Zone* zone,
+ Handle<String> input, RegExpFlags flags,
+ RegExpCompileData* result,
+ const DisallowGarbageCollection&) {
+ return ParseRegExpFromHeapString(isolate, zone, input, flags, result);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-parser.h b/deps/v8/src/regexp/regexp-parser.h
index 3766d43fb5..4fc6400297 100644
--- a/deps/v8/src/regexp/regexp-parser.h
+++ b/deps/v8/src/regexp/regexp-parser.h
@@ -5,367 +5,35 @@
#ifndef V8_REGEXP_REGEXP_PARSER_H_
#define V8_REGEXP_REGEXP_PARSER_H_
-#include "src/base/strings.h"
-#include "src/objects/js-regexp.h"
-#include "src/objects/objects.h"
-#include "src/regexp/regexp-ast.h"
-#include "src/regexp/regexp-error.h"
-#include "src/zone/zone.h"
+#include "src/common/assert-scope.h"
+#include "src/handles/handles.h"
+#include "src/regexp/regexp-flags.h"
namespace v8 {
namespace internal {
-struct RegExpCompileData;
-
-// A BufferedZoneList is an automatically growing list, just like (and backed
-// by) a ZoneList, that is optimized for the case of adding and removing
-// a single element. The last element added is stored outside the backing list,
-// and if no more than one element is ever added, the ZoneList isn't even
-// allocated.
-// Elements must not be nullptr pointers.
-template <typename T, int initial_size>
-class BufferedZoneList {
- public:
- BufferedZoneList() : list_(nullptr), last_(nullptr) {}
-
- // Adds element at end of list. This element is buffered and can
- // be read using last() or removed using RemoveLast until a new Add or until
- // RemoveLast or GetList has been called.
- void Add(T* value, Zone* zone) {
- if (last_ != nullptr) {
- if (list_ == nullptr) {
- list_ = zone->New<ZoneList<T*>>(initial_size, zone);
- }
- list_->Add(last_, zone);
- }
- last_ = value;
- }
-
- T* last() {
- DCHECK(last_ != nullptr);
- return last_;
- }
-
- T* RemoveLast() {
- DCHECK(last_ != nullptr);
- T* result = last_;
- if ((list_ != nullptr) && (list_->length() > 0))
- last_ = list_->RemoveLast();
- else
- last_ = nullptr;
- return result;
- }
-
- T* Get(int i) {
- DCHECK((0 <= i) && (i < length()));
- if (list_ == nullptr) {
- DCHECK_EQ(0, i);
- return last_;
- } else {
- if (i == list_->length()) {
- DCHECK(last_ != nullptr);
- return last_;
- } else {
- return list_->at(i);
- }
- }
- }
-
- void Clear() {
- list_ = nullptr;
- last_ = nullptr;
- }
-
- int length() {
- int length = (list_ == nullptr) ? 0 : list_->length();
- return length + ((last_ == nullptr) ? 0 : 1);
- }
-
- ZoneList<T*>* GetList(Zone* zone) {
- if (list_ == nullptr) {
- list_ = zone->New<ZoneList<T*>>(initial_size, zone);
- }
- if (last_ != nullptr) {
- list_->Add(last_, zone);
- last_ = nullptr;
- }
- return list_;
- }
-
- private:
- ZoneList<T*>* list_;
- T* last_;
-};
-
+class String;
+class Zone;
-// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
-class RegExpBuilder : public ZoneObject {
- public:
- RegExpBuilder(Zone* zone, JSRegExp::Flags flags);
- void AddCharacter(base::uc16 character);
- void AddUnicodeCharacter(base::uc32 character);
- void AddEscapedUnicodeCharacter(base::uc32 character);
- // "Adds" an empty expression. Does nothing except consume a
- // following quantifier
- void AddEmpty();
- void AddCharacterClass(RegExpCharacterClass* cc);
- void AddCharacterClassForDesugaring(base::uc32 c);
- void AddAtom(RegExpTree* tree);
- void AddTerm(RegExpTree* tree);
- void AddAssertion(RegExpTree* tree);
- void NewAlternative(); // '|'
- bool AddQuantifierToAtom(int min, int max,
- RegExpQuantifier::QuantifierType type);
- void FlushText();
- RegExpTree* ToRegExp();
- JSRegExp::Flags flags() const { return flags_; }
- void set_flags(JSRegExp::Flags flags) { flags_ = flags; }
-
- bool ignore_case() const { return (flags_ & JSRegExp::kIgnoreCase) != 0; }
- bool multiline() const { return (flags_ & JSRegExp::kMultiline) != 0; }
- bool dotall() const { return (flags_ & JSRegExp::kDotAll) != 0; }
-
- private:
- static const base::uc16 kNoPendingSurrogate = 0;
- void AddLeadSurrogate(base::uc16 lead_surrogate);
- void AddTrailSurrogate(base::uc16 trail_surrogate);
- void FlushPendingSurrogate();
- void FlushCharacters();
- void FlushTerms();
- bool NeedsDesugaringForUnicode(RegExpCharacterClass* cc);
- bool NeedsDesugaringForIgnoreCase(base::uc32 c);
- Zone* zone() const { return zone_; }
- bool unicode() const { return (flags_ & JSRegExp::kUnicode) != 0; }
-
- Zone* zone_;
- bool pending_empty_;
- JSRegExp::Flags flags_;
- ZoneList<base::uc16>* characters_;
- base::uc16 pending_surrogate_;
- BufferedZoneList<RegExpTree, 2> terms_;
- BufferedZoneList<RegExpTree, 2> text_;
- BufferedZoneList<RegExpTree, 2> alternatives_;
-#ifdef DEBUG
- enum { ADD_NONE, ADD_CHAR, ADD_TERM, ADD_ASSERT, ADD_ATOM } last_added_;
-#define LAST(x) last_added_ = x;
-#else
-#define LAST(x)
-#endif
-};
+struct RegExpCompileData;
-class V8_EXPORT_PRIVATE RegExpParser {
+class V8_EXPORT_PRIVATE RegExpParser : public AllStatic {
public:
- RegExpParser(FlatStringReader* in, JSRegExp::Flags flags, Isolate* isolate,
- Zone* zone);
+ static bool ParseRegExpFromHeapString(Isolate* isolate, Zone* zone,
+ Handle<String> input, RegExpFlags flags,
+ RegExpCompileData* result);
- static bool ParseRegExp(Isolate* isolate, Zone* zone, FlatStringReader* input,
- JSRegExp::Flags flags, RegExpCompileData* result);
+ template <class CharT>
+ static bool VerifyRegExpSyntax(Zone* zone, uintptr_t stack_limit,
+ const CharT* input, int input_length,
+ RegExpFlags flags, RegExpCompileData* result,
+ const DisallowGarbageCollection& no_gc);
// Used by the SpiderMonkey embedding of irregexp.
static bool VerifyRegExpSyntax(Isolate* isolate, Zone* zone,
- FlatStringReader* input, JSRegExp::Flags flags,
+ Handle<String> input, RegExpFlags flags,
RegExpCompileData* result,
- const DisallowGarbageCollection& nogc);
-
- private:
- bool Parse(RegExpCompileData* result, const DisallowGarbageCollection&);
-
- RegExpTree* ParsePattern();
- RegExpTree* ParseDisjunction();
- RegExpTree* ParseGroup();
-
- // Parses a {...,...} quantifier and stores the range in the given
- // out parameters.
- bool ParseIntervalQuantifier(int* min_out, int* max_out);
-
- // Parses and returns a single escaped character. The character
- // must not be 'b' or 'B' since they are usually handle specially.
- base::uc32 ParseClassCharacterEscape();
-
- // Checks whether the following is a length-digit hexadecimal number,
- // and sets the value if it is.
- bool ParseHexEscape(int length, base::uc32* value);
- bool ParseUnicodeEscape(base::uc32* value);
- bool ParseUnlimitedLengthHexNumber(int max_value, base::uc32* value);
-
- bool ParsePropertyClassName(ZoneVector<char>* name_1,
- ZoneVector<char>* name_2);
- bool AddPropertyClassRange(ZoneList<CharacterRange>* add_to, bool negate,
- const ZoneVector<char>& name_1,
- const ZoneVector<char>& name_2);
-
- RegExpTree* GetPropertySequence(const ZoneVector<char>& name_1);
- RegExpTree* ParseCharacterClass(const RegExpBuilder* state);
-
- base::uc32 ParseOctalLiteral();
-
- // Tries to parse the input as a back reference. If successful it
- // stores the result in the output parameter and returns true. If
- // it fails it will push back the characters read so the same characters
- // can be reparsed.
- bool ParseBackReferenceIndex(int* index_out);
-
- // Parse inside a class. Either add escaped class to the range, or return
- // false and pass parsed single character through |char_out|.
- void ParseClassEscape(ZoneList<CharacterRange>* ranges, Zone* zone,
- bool add_unicode_case_equivalents, base::uc32* char_out,
- bool* is_class_escape);
-
- char ParseClassEscape();
-
- RegExpTree* ReportError(RegExpError error);
- void Advance();
- void Advance(int dist);
- void Reset(int pos);
-
- // Reports whether the pattern might be used as a literal search string.
- // Only use if the result of the parse is a single atom node.
- bool simple();
- bool contains_anchor() { return contains_anchor_; }
- void set_contains_anchor() { contains_anchor_ = true; }
- int captures_started() { return captures_started_; }
- int position() { return next_pos_ - 1; }
- bool failed() { return failed_; }
- // The Unicode flag can't be changed using in-regexp syntax, so it's OK to
- // just read the initial flag value here.
- bool unicode() const { return (top_level_flags_ & JSRegExp::kUnicode) != 0; }
-
- static bool IsSyntaxCharacterOrSlash(base::uc32 c);
-
- static const base::uc32 kEndMarker = (1 << 21);
-
- private:
- enum SubexpressionType {
- INITIAL,
- CAPTURE, // All positive values represent captures.
- POSITIVE_LOOKAROUND,
- NEGATIVE_LOOKAROUND,
- GROUPING
- };
-
- class RegExpParserState : public ZoneObject {
- public:
- // Push a state on the stack.
- RegExpParserState(RegExpParserState* previous_state,
- SubexpressionType group_type,
- RegExpLookaround::Type lookaround_type,
- int disjunction_capture_index,
- const ZoneVector<base::uc16>* capture_name,
- JSRegExp::Flags flags, Zone* zone)
- : previous_state_(previous_state),
- builder_(zone->New<RegExpBuilder>(zone, flags)),
- group_type_(group_type),
- lookaround_type_(lookaround_type),
- disjunction_capture_index_(disjunction_capture_index),
- capture_name_(capture_name) {}
- // Parser state of containing expression, if any.
- RegExpParserState* previous_state() const { return previous_state_; }
- bool IsSubexpression() { return previous_state_ != nullptr; }
- // RegExpBuilder building this regexp's AST.
- RegExpBuilder* builder() const { return builder_; }
- // Type of regexp being parsed (parenthesized group or entire regexp).
- SubexpressionType group_type() const { return group_type_; }
- // Lookahead or Lookbehind.
- RegExpLookaround::Type lookaround_type() const { return lookaround_type_; }
- // Index in captures array of first capture in this sub-expression, if any.
- // Also the capture index of this sub-expression itself, if group_type
- // is CAPTURE.
- int capture_index() const { return disjunction_capture_index_; }
- // The name of the current sub-expression, if group_type is CAPTURE. Only
- // used for named captures.
- const ZoneVector<base::uc16>* capture_name() const { return capture_name_; }
-
- bool IsNamedCapture() const { return capture_name_ != nullptr; }
-
- // Check whether the parser is inside a capture group with the given index.
- bool IsInsideCaptureGroup(int index);
- // Check whether the parser is inside a capture group with the given name.
- bool IsInsideCaptureGroup(const ZoneVector<base::uc16>* name);
-
- private:
- // Linked list implementation of stack of states.
- RegExpParserState* const previous_state_;
- // Builder for the stored disjunction.
- RegExpBuilder* const builder_;
- // Stored disjunction type (capture, look-ahead or grouping), if any.
- const SubexpressionType group_type_;
- // Stored read direction.
- const RegExpLookaround::Type lookaround_type_;
- // Stored disjunction's capture index (if any).
- const int disjunction_capture_index_;
- // Stored capture name (if any).
- const ZoneVector<base::uc16>* const capture_name_;
- };
-
- // Return the 1-indexed RegExpCapture object, allocate if necessary.
- RegExpCapture* GetCapture(int index);
-
- // Creates a new named capture at the specified index. Must be called exactly
- // once for each named capture. Fails if a capture with the same name is
- // encountered.
- bool CreateNamedCaptureAtIndex(const ZoneVector<base::uc16>* name, int index);
-
- // Parses the name of a capture group (?<name>pattern). The name must adhere
- // to IdentifierName in the ECMAScript standard.
- const ZoneVector<base::uc16>* ParseCaptureGroupName();
-
- bool ParseNamedBackReference(RegExpBuilder* builder,
- RegExpParserState* state);
- RegExpParserState* ParseOpenParenthesis(RegExpParserState* state);
-
- // After the initial parsing pass, patch corresponding RegExpCapture objects
- // into all RegExpBackReferences. This is done after initial parsing in order
- // to avoid complicating cases in which references comes before the capture.
- void PatchNamedBackReferences();
-
- Handle<FixedArray> CreateCaptureNameMap();
-
- // Returns true iff the pattern contains named captures. May call
- // ScanForCaptures to look ahead at the remaining pattern.
- bool HasNamedCaptures();
-
- Isolate* isolate() { return isolate_; }
- Zone* zone() const { return zone_; }
-
- base::uc32 current() { return current_; }
- bool has_more() { return has_more_; }
- bool has_next() { return next_pos_ < in()->length(); }
- base::uc32 Next();
- template <bool update_position>
- base::uc32 ReadNext();
- FlatStringReader* in() { return in_; }
- void ScanForCaptures();
-
- struct RegExpCaptureNameLess {
- bool operator()(const RegExpCapture* lhs, const RegExpCapture* rhs) const {
- DCHECK_NOT_NULL(lhs);
- DCHECK_NOT_NULL(rhs);
- return *lhs->name() < *rhs->name();
- }
- };
-
- Isolate* isolate_;
- Zone* zone_;
- RegExpError error_ = RegExpError::kNone;
- int error_pos_ = 0;
- ZoneList<RegExpCapture*>* captures_;
- ZoneSet<RegExpCapture*, RegExpCaptureNameLess>* named_captures_;
- ZoneList<RegExpBackReference*>* named_back_references_;
- FlatStringReader* in_;
- base::uc32 current_;
- // These are the flags specified outside the regexp syntax ie after the
- // terminating '/' or in the second argument to the constructor. The current
- // flags are stored on the RegExpBuilder.
- JSRegExp::Flags top_level_flags_;
- int next_pos_;
- int captures_started_;
- int capture_count_; // Only valid after we have scanned for captures.
- bool has_more_;
- bool simple_;
- bool contains_anchor_;
- bool is_scanned_for_captures_;
- bool has_named_captures_; // Only valid after we have scanned for captures.
- bool failed_;
+ const DisallowGarbageCollection& no_gc);
};
} // namespace internal
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index 1e72a124c9..dabe5ee4a2 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -120,33 +120,6 @@ MaybeHandle<Object> RegExpUtils::RegExpExec(Isolate* isolate,
}
}
-Maybe<bool> RegExpUtils::IsRegExp(Isolate* isolate, Handle<Object> object) {
- if (!object->IsJSReceiver()) return Just(false);
-
- Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
-
- Handle<Object> match;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, match,
- JSObject::GetProperty(isolate, receiver,
- isolate->factory()->match_symbol()),
- Nothing<bool>());
-
- if (!match->IsUndefined(isolate)) {
- const bool match_as_boolean = match->BooleanValue(isolate);
-
- if (match_as_boolean && !object->IsJSRegExp()) {
- isolate->CountUsage(v8::Isolate::kRegExpMatchIsTrueishOnNonJSRegExp);
- } else if (!match_as_boolean && object->IsJSRegExp()) {
- isolate->CountUsage(v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp);
- }
-
- return Just(match_as_boolean);
- }
-
- return Just(object->IsJSRegExp());
-}
-
bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
#ifdef V8_ENABLE_FORCE_SLOW_PATH
if (isolate->force_slow_path()) return false;
diff --git a/deps/v8/src/regexp/regexp-utils.h b/deps/v8/src/regexp/regexp-utils.h
index 19f1f24039..c0333fb170 100644
--- a/deps/v8/src/regexp/regexp-utils.h
+++ b/deps/v8/src/regexp/regexp-utils.h
@@ -5,12 +5,15 @@
#ifndef V8_REGEXP_REGEXP_UTILS_H_
#define V8_REGEXP_REGEXP_UTILS_H_
-#include "src/objects/objects.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
+class JSReceiver;
+class Object;
class RegExpMatchInfo;
+class String;
// Helper methods for C++ regexp builtins.
class RegExpUtils : public AllStatic {
@@ -31,10 +34,6 @@ class RegExpUtils : public AllStatic {
Isolate* isolate, Handle<JSReceiver> regexp, Handle<String> string,
Handle<Object> exec);
- // ES#sec-isregexp IsRegExp ( argument )
- // Includes checking of the match property.
- static Maybe<bool> IsRegExp(Isolate* isolate, Handle<Object> object);
-
// Checks whether the given object is an unmodified JSRegExp instance.
// Neither the object's map, nor its prototype's map, nor any relevant
// method on the prototype may be modified.
diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc
index 9bdebe1918..742c6d9999 100644
--- a/deps/v8/src/regexp/regexp.cc
+++ b/deps/v8/src/regexp/regexp.cc
@@ -37,7 +37,7 @@ class RegExpImpl final : public AllStatic {
// Prepares a JSRegExp object with Irregexp-specific data.
static void IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern, JSRegExp::Flags flags,
+ Handle<String> pattern, RegExpFlags flags,
int capture_count, uint32_t backtrack_limit);
// Prepare a RegExp for being executed one or more times (using
@@ -51,7 +51,7 @@ class RegExpImpl final : public AllStatic {
Handle<String> subject);
static void AtomCompile(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern, JSRegExp::Flags flags,
+ Handle<String> pattern, RegExpFlags flags,
Handle<String> match_pattern);
static int AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
@@ -90,7 +90,7 @@ class RegExpImpl final : public AllStatic {
// Returns true on success, false on failure.
static bool Compile(Isolate* isolate, Zone* zone, RegExpCompileData* input,
- JSRegExp::Flags flags, Handle<String> pattern,
+ RegExpFlags flags, Handle<String> pattern,
Handle<String> sample_subject, bool is_one_byte,
uint32_t& backtrack_limit);
@@ -102,6 +102,32 @@ class RegExpImpl final : public AllStatic {
static Code IrregexpNativeCode(FixedArray re, bool is_one_byte);
};
+// static
+bool RegExp::CanGenerateBytecode() {
+ return FLAG_regexp_interpret_all || FLAG_regexp_tier_up;
+}
+
+// static
+template <class CharT>
+bool RegExp::VerifySyntax(Zone* zone, uintptr_t stack_limit, const CharT* input,
+ int input_length, RegExpFlags flags,
+ RegExpError* regexp_error_out,
+ const DisallowGarbageCollection& no_gc) {
+ RegExpCompileData data;
+ bool pattern_is_valid = RegExpParser::VerifyRegExpSyntax(
+ zone, stack_limit, input, input_length, flags, &data, no_gc);
+ *regexp_error_out = data.error;
+ return pattern_is_valid;
+}
+
+template bool RegExp::VerifySyntax<uint8_t>(Zone*, uintptr_t, const uint8_t*,
+ int, RegExpFlags,
+ RegExpError* regexp_error_out,
+ const DisallowGarbageCollection&);
+template bool RegExp::VerifySyntax<base::uc16>(
+ Zone*, uintptr_t, const base::uc16*, int, RegExpFlags,
+ RegExpError* regexp_error_out, const DisallowGarbageCollection&);
+
MaybeHandle<Object> RegExp::ThrowRegExpException(Isolate* isolate,
Handle<JSRegExp> re,
Handle<String> pattern,
@@ -154,8 +180,7 @@ static bool HasFewDifferentCharacters(Handle<String> pattern) {
// static
MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags,
+ Handle<String> pattern, RegExpFlags flags,
uint32_t backtrack_limit) {
DCHECK(pattern->IsFlat());
@@ -169,8 +194,8 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
CompilationCache* compilation_cache = nullptr;
if (is_compilation_cache_enabled) {
compilation_cache = isolate->compilation_cache();
- MaybeHandle<FixedArray> maybe_cached =
- compilation_cache->LookupRegExp(pattern, flags);
+ MaybeHandle<FixedArray> maybe_cached = compilation_cache->LookupRegExp(
+ pattern, JSRegExp::AsJSRegExpFlags(flags));
Handle<FixedArray> cached;
if (maybe_cached.ToHandle(&cached)) {
re->set_data(*cached);
@@ -180,10 +205,9 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
PostponeInterruptsScope postpone(isolate);
RegExpCompileData parse_result;
- FlatStringReader reader(isolate, pattern);
DCHECK(!isolate->has_pending_exception());
- if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
- &parse_result)) {
+ if (!RegExpParser::ParseRegExpFromHeapString(isolate, &zone, pattern, flags,
+ &parse_result)) {
// Throw an exception if we fail to parse the pattern.
return RegExp::ThrowRegExpException(isolate, re, pattern,
parse_result.error);
@@ -210,7 +234,7 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
ExperimentalRegExp::Initialize(isolate, re, pattern, flags,
parse_result.capture_count);
has_been_compiled = true;
- } else if (parse_result.simple && !IgnoreCase(flags) && !IsSticky(flags) &&
+ } else if (parse_result.simple && !IsIgnoreCase(flags) && !IsSticky(flags) &&
!HasFewDifferentCharacters(pattern)) {
// Parse-tree is a single atom that is equal to the pattern.
RegExpImpl::AtomCompile(isolate, re, pattern, flags, pattern);
@@ -225,7 +249,7 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, atom_string,
isolate->factory()->NewStringFromTwoByte(atom_pattern), Object);
- if (!IgnoreCase(flags) && !HasFewDifferentCharacters(atom_string)) {
+ if (!IsIgnoreCase(flags) && !HasFewDifferentCharacters(atom_string)) {
RegExpImpl::AtomCompile(isolate, re, pattern, flags, atom_string);
has_been_compiled = true;
}
@@ -239,7 +263,8 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
// and we can store it in the cache.
Handle<FixedArray> data(FixedArray::cast(re->data()), isolate);
if (is_compilation_cache_enabled) {
- compilation_cache->PutRegExp(pattern, flags, data);
+ compilation_cache->PutRegExp(pattern, JSRegExp::AsJSRegExpFlags(flags),
+ data);
}
return re;
@@ -301,9 +326,10 @@ MaybeHandle<Object> RegExp::Exec(Isolate* isolate, Handle<JSRegExp> regexp,
// RegExp Atom implementation: Simple string search using indexOf.
void RegExpImpl::AtomCompile(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern, JSRegExp::Flags flags,
+ Handle<String> pattern, RegExpFlags flags,
Handle<String> match_pattern) {
- isolate->factory()->SetRegExpAtomData(re, pattern, flags, match_pattern);
+ isolate->factory()->SetRegExpAtomData(
+ re, pattern, JSRegExp::AsJSRegExpFlags(flags), match_pattern);
}
static void SetAtomLastCapture(Isolate* isolate,
@@ -420,9 +446,9 @@ bool RegExpImpl::EnsureCompiledIrregexp(Isolate* isolate, Handle<JSRegExp> re,
return CompileIrregexp(isolate, re, sample_subject, is_one_byte);
}
-#ifdef DEBUG
namespace {
+#ifdef DEBUG
bool RegExpCodeIsValidForPreCompilation(Handle<JSRegExp> re, bool is_one_byte) {
Object entry = re->Code(is_one_byte);
Object bytecode = re->Bytecode(is_one_byte);
@@ -448,9 +474,50 @@ bool RegExpCodeIsValidForPreCompilation(Handle<JSRegExp> re, bool is_one_byte) {
return true;
}
+#endif
+
+struct RegExpCaptureIndexLess {
+ bool operator()(const RegExpCapture* lhs, const RegExpCapture* rhs) const {
+ DCHECK_NOT_NULL(lhs);
+ DCHECK_NOT_NULL(rhs);
+ return lhs->index() < rhs->index();
+ }
+};
} // namespace
-#endif
+
+// static
+Handle<FixedArray> RegExp::CreateCaptureNameMap(
+ Isolate* isolate, ZoneVector<RegExpCapture*>* named_captures) {
+ if (named_captures == nullptr) return Handle<FixedArray>();
+
+ DCHECK(!named_captures->empty());
+
+ // Named captures are sorted by name (because the set is used to ensure
+ // name uniqueness). But the capture name map must to be sorted by index.
+
+ std::sort(named_captures->begin(), named_captures->end(),
+ RegExpCaptureIndexLess{});
+
+ int len = static_cast<int>(named_captures->size()) * 2;
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(len);
+
+ int i = 0;
+ for (const RegExpCapture* capture : *named_captures) {
+ base::Vector<const base::uc16> capture_name(capture->name()->data(),
+ capture->name()->size());
+ // CSA code in ConstructNewResultFromMatchInfo requires these strings to be
+ // internalized so they can be used as property names in the 'exec' results.
+ Handle<String> name = isolate->factory()->InternalizeString(capture_name);
+ array->set(i * 2, *name);
+ array->set(i * 2 + 1, Smi::FromInt(capture->index()));
+
+ i++;
+ }
+ DCHECK_EQ(i * 2, len);
+
+ return array;
+}
bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
Handle<String> sample_subject,
@@ -461,14 +528,13 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
DCHECK(RegExpCodeIsValidForPreCompilation(re, is_one_byte));
- JSRegExp::Flags flags = re->GetFlags();
+ RegExpFlags flags = JSRegExp::AsRegExpFlags(re->GetFlags());
Handle<String> pattern(re->Pattern(), isolate);
pattern = String::Flatten(isolate, pattern);
RegExpCompileData compile_data;
- FlatStringReader reader(isolate, pattern);
- if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
- &compile_data)) {
+ if (!RegExpParser::ParseRegExpFromHeapString(isolate, &zone, pattern, flags,
+ &compile_data)) {
// Throw an exception if we fail to parse the pattern.
// THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
USE(RegExp::ThrowRegExpException(isolate, re, pattern, compile_data.error));
@@ -513,7 +579,9 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
BUILTIN_CODE(isolate, RegExpInterpreterTrampoline);
data->set(JSRegExp::code_index(is_one_byte), ToCodeT(*trampoline));
}
- re->SetCaptureNameMap(compile_data.capture_name_map);
+ Handle<FixedArray> capture_name_map =
+ RegExp::CreateCaptureNameMap(isolate, compile_data.named_captures);
+ re->SetCaptureNameMap(capture_name_map);
int register_max = IrregexpMaxRegisterCount(*data);
if (compile_data.register_count > register_max) {
SetIrregexpMaxRegisterCount(*data, compile_data.register_count);
@@ -553,12 +621,13 @@ Code RegExpImpl::IrregexpNativeCode(FixedArray re, bool is_one_byte) {
}
void RegExpImpl::IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags, int capture_count,
+ Handle<String> pattern, RegExpFlags flags,
+ int capture_count,
uint32_t backtrack_limit) {
// Initialize compiled code entries to null.
- isolate->factory()->SetRegExpIrregexpData(re, pattern, flags, capture_count,
- backtrack_limit);
+ isolate->factory()->SetRegExpIrregexpData(re, pattern,
+ JSRegExp::AsJSRegExpFlags(flags),
+ capture_count, backtrack_limit);
}
// static
@@ -783,7 +852,7 @@ bool TooMuchRegExpCode(Isolate* isolate, Handle<String> pattern) {
// static
bool RegExp::CompileForTesting(Isolate* isolate, Zone* zone,
- RegExpCompileData* data, JSRegExp::Flags flags,
+ RegExpCompileData* data, RegExpFlags flags,
Handle<String> pattern,
Handle<String> sample_subject,
bool is_one_byte) {
@@ -793,7 +862,7 @@ bool RegExp::CompileForTesting(Isolate* isolate, Zone* zone,
}
bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
- JSRegExp::Flags flags, Handle<String> pattern,
+ RegExpFlags flags, Handle<String> pattern,
Handle<String> sample_subject, bool is_one_byte,
uint32_t& backtrack_limit) {
if (JSRegExp::RegistersForCaptureCount(data->capture_count) >
@@ -868,6 +937,9 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
#elif V8_TARGET_ARCH_RISCV64
macro_assembler.reset(new RegExpMacroAssemblerRISCV(isolate, zone, mode,
output_register_count));
+#elif V8_TARGET_ARCH_LOONG64
+ macro_assembler.reset(new RegExpMacroAssemblerLOONG64(
+ isolate, zone, mode, output_register_count));
#else
#error "Unsupported architecture"
#endif
@@ -970,7 +1042,7 @@ RegExpGlobalCache::RegExpGlobalCache(Handle<JSRegExp> regexp,
regexp_(regexp),
subject_(subject),
isolate_(isolate) {
- DCHECK(IsGlobal(regexp->GetFlags()));
+ DCHECK(IsGlobal(JSRegExp::AsRegExpFlags(regexp->GetFlags())));
switch (regexp_->TypeTag()) {
case JSRegExp::NOT_COMPILED:
@@ -1045,7 +1117,8 @@ RegExpGlobalCache::~RegExpGlobalCache() {
}
int RegExpGlobalCache::AdvanceZeroLength(int last_index) {
- if (IsUnicode(regexp_->GetFlags()) && last_index + 1 < subject_->length() &&
+ if (IsUnicode(JSRegExp::AsRegExpFlags(regexp_->GetFlags())) &&
+ last_index + 1 < subject_->length() &&
unibrow::Utf16::IsLeadSurrogate(subject_->Get(last_index)) &&
unibrow::Utf16::IsTrailSurrogate(subject_->Get(last_index + 1))) {
// Advance over the surrogate pair.
diff --git a/deps/v8/src/regexp/regexp.h b/deps/v8/src/regexp/regexp.h
index 40fe832fd7..60a240f259 100644
--- a/deps/v8/src/regexp/regexp.h
+++ b/deps/v8/src/regexp/regexp.h
@@ -5,12 +5,18 @@
#ifndef V8_REGEXP_REGEXP_H_
#define V8_REGEXP_REGEXP_H_
-#include "src/objects/js-regexp.h"
+#include "src/common/assert-scope.h"
+#include "src/handles/handles.h"
#include "src/regexp/regexp-error.h"
+#include "src/regexp/regexp-flags.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
+class JSRegExp;
+class RegExpCapture;
+class RegExpMatchInfo;
class RegExpNode;
class RegExpTree;
@@ -37,9 +43,9 @@ struct RegExpCompileData {
// True, iff the pattern is anchored at the start of the string with '^'.
bool contains_anchor = false;
- // Only use if the pattern contains named captures. If so, this contains a
- // mapping of capture names to capture indices.
- Handle<FixedArray> capture_name_map;
+ // Only set if the pattern contains named captures.
+ // Note: the lifetime equals that of the parse/compile zone.
+ ZoneVector<RegExpCapture*>* named_captures = nullptr;
// The error message. Only used if an error occurred during parsing or
// compilation.
@@ -62,9 +68,15 @@ struct RegExpCompileData {
class RegExp final : public AllStatic {
public:
// Whether the irregexp engine generates interpreter bytecode.
- static bool CanGenerateBytecode() {
- return FLAG_regexp_interpret_all || FLAG_regexp_tier_up;
- }
+ static bool CanGenerateBytecode();
+
+ // Verify the given pattern, i.e. check that parsing succeeds. If
+ // verification fails, `regexp_error_out` is set.
+ template <class CharT>
+ static bool VerifySyntax(Zone* zone, uintptr_t stack_limit,
+ const CharT* input, int input_length,
+ RegExpFlags flags, RegExpError* regexp_error_out,
+ const DisallowGarbageCollection& no_gc);
// Parses the RegExp pattern and prepares the JSRegExp object with
// generic data and choice of implementation - as well as what
@@ -72,7 +84,7 @@ class RegExp final : public AllStatic {
// Returns false if compilation fails.
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Compile(
Isolate* isolate, Handle<JSRegExp> re, Handle<String> pattern,
- JSRegExp::Flags flags, uint32_t backtrack_limit);
+ RegExpFlags flags, uint32_t backtrack_limit);
// Ensures that a regexp is fully compiled and ready to be executed on a
// subject string. Returns true on success. Return false on failure, and
@@ -131,12 +143,9 @@ class RegExp final : public AllStatic {
Isolate* isolate, Handle<RegExpMatchInfo> last_match_info,
Handle<String> subject, int capture_count, int32_t* match);
- V8_EXPORT_PRIVATE static bool CompileForTesting(Isolate* isolate, Zone* zone,
- RegExpCompileData* input,
- JSRegExp::Flags flags,
- Handle<String> pattern,
- Handle<String> sample_subject,
- bool is_one_byte);
+ V8_EXPORT_PRIVATE static bool CompileForTesting(
+ Isolate* isolate, Zone* zone, RegExpCompileData* input, RegExpFlags flags,
+ Handle<String> pattern, Handle<String> sample_subject, bool is_one_byte);
V8_EXPORT_PRIVATE static void DotPrintForTesting(const char* label,
RegExpNode* node);
@@ -152,6 +161,9 @@ class RegExp final : public AllStatic {
RegExpError error_text);
static bool IsUnmodifiedRegExp(Isolate* isolate, Handle<JSRegExp> regexp);
+
+ static Handle<FixedArray> CreateCaptureNameMap(
+ Isolate* isolate, ZoneVector<RegExpCapture*>* named_captures);
};
// Uses a special global mode of irregexp-generated code to perform a global
diff --git a/deps/v8/src/roots/DIR_METADATA b/deps/v8/src/roots/DIR_METADATA
index ff55846b31..af999da1f2 100644
--- a/deps/v8/src/roots/DIR_METADATA
+++ b/deps/v8/src/roots/DIR_METADATA
@@ -7,5 +7,5 @@
# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
monorail {
- component: "Blink>JavaScript>GC"
-} \ No newline at end of file
+ component: "Blink>JavaScript>GarbageCollection"
+}
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index 32a1353177..1fb80f780d 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -20,7 +20,7 @@ namespace internal {
// Other platforms have CSA support, see builtins-sharedarraybuffer-gen.h.
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || \
- V8_TARGET_ARCH_RISCV64
+ V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
namespace {
@@ -606,6 +606,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsXor) { UNREACHABLE(); }
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
// || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
- // || V8_TARGET_ARCH_RISCV64
+ // || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index e03a9c06ff..7a67c78db1 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -29,7 +29,9 @@ RUNTIME_FUNCTION(Runtime_SetGrow) {
OrderedHashSet::EnsureGrowable(isolate, table);
if (!table_candidate.ToHandle(&table)) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kValueOutOfRange));
+ isolate,
+ NewRangeError(MessageTemplate::kCollectionGrowFailed,
+ isolate->factory()->NewStringFromAsciiChecked("Set")));
}
holder->set_table(*table);
return ReadOnlyRoots(isolate).undefined_value();
@@ -64,7 +66,9 @@ RUNTIME_FUNCTION(Runtime_MapGrow) {
OrderedHashMap::EnsureGrowable(isolate, table);
if (!table_candidate.ToHandle(&table)) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kValueOutOfRange));
+ isolate,
+ NewRangeError(MessageTemplate::kCollectionGrowFailed,
+ isolate->factory()->NewStringFromAsciiChecked("Map")));
}
holder->set_table(*table);
return ReadOnlyRoots(isolate).undefined_value();
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 7088e4074e..54924e0f7b 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -83,13 +83,13 @@ RUNTIME_FUNCTION(Runtime_InstallBaselineCode) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
Handle<SharedFunctionInfo> sfi(function->shared(), isolate);
- DCHECK(sfi->HasBaselineData());
+ DCHECK(sfi->HasBaselineCode());
IsCompiledScope is_compiled_scope(*sfi, isolate);
DCHECK(!function->HasAvailableOptimizedCode());
DCHECK(!function->HasOptimizationMarker());
DCHECK(!function->has_feedback_vector());
JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
- Code baseline_code = sfi->baseline_data().baseline_code();
+ Code baseline_code = sfi->baseline_code(kAcquireLoad);
function->set_code(baseline_code);
return baseline_code;
}
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index a67a6f09c6..f9e60c64b3 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -54,8 +54,9 @@ RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
// Underlying function needs to have bytecode available.
DCHECK(function->shared().HasBytecodeArray());
- int size = function->shared().internal_formal_parameter_count() +
- function->shared().GetBytecodeArray(isolate).register_count();
+ int size =
+ function->shared().internal_formal_parameter_count_without_receiver() +
+ function->shared().GetBytecodeArray(isolate).register_count();
Handle<FixedArray> parameters_and_registers =
isolate->factory()->NewFixedArray(size);
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index f9dce4d271..d86fc23622 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -31,6 +31,12 @@
#include "src/strings/string-builder-inl.h"
#include "src/utils/ostreams.h"
+#if V8_ENABLE_WEBASSEMBLY
+// TODO(jkummerow): Drop this when the "SaveAndClearThreadInWasmFlag"
+// short-term mitigation is no longer needed.
+#include "src/trap-handler/trap-handler.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
@@ -418,6 +424,34 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromCode) {
return ReadOnlyRoots(isolate).undefined_value();
}
+namespace {
+
+#if V8_ENABLE_WEBASSEMBLY
+class SaveAndClearThreadInWasmFlag {
+ public:
+ SaveAndClearThreadInWasmFlag() {
+ if (trap_handler::IsTrapHandlerEnabled()) {
+ if (trap_handler::IsThreadInWasm()) {
+ thread_was_in_wasm_ = true;
+ trap_handler::ClearThreadInWasm();
+ }
+ }
+ }
+ ~SaveAndClearThreadInWasmFlag() {
+ if (thread_was_in_wasm_) {
+ trap_handler::SetThreadInWasm();
+ }
+ }
+
+ private:
+ bool thread_was_in_wasm_{false};
+};
+#else
+class SaveAndClearThreadInWasmFlag {};
+#endif // V8_ENABLE_WEBASSEMBLY
+
+} // namespace
+
RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -434,6 +468,14 @@ RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
CHECK(size <= kMaxRegularHeapObjectSize);
}
+#if V8_ENABLE_WEBASSEMBLY
+ // Short-term mitigation for crbug.com/1236668. When this is called from
+ // WasmGC code, clear the "thread in wasm" flag, which is important in case
+ // any GC needs to happen.
+ // TODO(jkummerow): Find a better fix, likely by replacing the global flag.
+ SaveAndClearThreadInWasmFlag clear_wasm_flag;
+#endif // V8_ENABLE_WEBASSEMBLY
+
// TODO(v8:9472): Until double-aligned allocation is fixed for new-space
// allocations, don't request it.
double_align = false;
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index 52fadb8c8c..9adde80fd9 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -12,6 +12,18 @@
namespace v8 {
namespace internal {
+namespace {
+Handle<Script> GetEvalOrigin(Isolate* isolate, Script origin_script) {
+ DisallowGarbageCollection no_gc;
+ while (origin_script.has_eval_from_shared()) {
+ HeapObject maybe_script = origin_script.eval_from_shared().script();
+ CHECK(maybe_script.IsScript());
+ origin_script = Script::cast(maybe_script);
+ }
+ return handle(origin_script, isolate);
+}
+} // namespace
+
RUNTIME_FUNCTION(Runtime_DynamicImportCall) {
HandleScope scope(isolate);
DCHECK_LE(2, args.length());
@@ -25,17 +37,11 @@ RUNTIME_FUNCTION(Runtime_DynamicImportCall) {
import_assertions = args.at<Object>(2);
}
- Handle<Script> script(Script::cast(function->shared().script()), isolate);
-
- while (script->has_eval_from_shared()) {
- Object maybe_script = script->eval_from_shared().script();
- CHECK(maybe_script.IsScript());
- script = handle(Script::cast(maybe_script), isolate);
- }
-
+ Handle<Script> referrer_script =
+ GetEvalOrigin(isolate, Script::cast(function->shared().script()));
RETURN_RESULT_OR_FAILURE(isolate,
isolate->RunHostImportModuleDynamicallyCallback(
- script, specifier, import_assertions));
+ referrer_script, specifier, import_assertions));
}
RUNTIME_FUNCTION(Runtime_GetModuleNamespace) {
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 42bbb10d92..bec54bd8d4 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -49,22 +49,10 @@ MaybeHandle<Object> Runtime::GetObjectProperty(
if (!it.IsFound() && key->IsSymbol() &&
Symbol::cast(*key).is_private_name()) {
- Handle<Symbol> sym = Handle<Symbol>::cast(key);
- Handle<Object> name(sym->description(), isolate);
- DCHECK(name->IsString());
- Handle<String> name_string = Handle<String>::cast(name);
- if (sym->IsPrivateBrand()) {
- Handle<String> class_name = (name_string->length() == 0)
- ? isolate->factory()->anonymous_string()
- : name_string;
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kInvalidPrivateBrand,
- class_name, lookup_start_object),
- Object);
- }
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kInvalidPrivateMemberRead,
- name_string, lookup_start_object),
+ MessageTemplate message = Symbol::cast(*key).IsPrivateBrand()
+ ? MessageTemplate::kInvalidPrivateBrand
+ : MessageTemplate::kInvalidPrivateMemberRead;
+ THROW_NEW_ERROR(isolate, NewTypeError(message, key, lookup_start_object),
Object);
}
return result;
@@ -1424,7 +1412,9 @@ RUNTIME_FUNCTION(Runtime_AddPrivateBrand) {
if (it.IsFound()) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kVarRedeclaration, brand));
+ isolate,
+ NewTypeError(MessageTemplate::kInvalidPrivateBrandReinitialization,
+ brand));
}
PropertyAttributes attributes =
@@ -1447,7 +1437,8 @@ RUNTIME_FUNCTION(Runtime_AddPrivateField) {
if (it.IsFound()) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kVarRedeclaration, key));
+ isolate,
+ NewTypeError(MessageTemplate::kInvalidPrivateFieldReitialization, key));
}
CHECK(Object::AddDataProperty(&it, value, NONE, Just(kDontThrow),
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index f49689c292..8b65ffb7cc 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -401,7 +401,8 @@ Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
isolate->factory()->NewArgumentsObject(callee, argument_count);
// Allocate the elements if needed.
- int parameter_count = callee->shared().internal_formal_parameter_count();
+ int parameter_count =
+ callee->shared().internal_formal_parameter_count_without_receiver();
if (argument_count > 0) {
if (parameter_count > 0) {
int mapped_count = std::min(argument_count, parameter_count);
@@ -526,7 +527,8 @@ RUNTIME_FUNCTION(Runtime_NewRestParameter) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0)
- int start_index = callee->shared().internal_formal_parameter_count();
+ int start_index =
+ callee->shared().internal_formal_parameter_count_without_receiver();
// This generic runtime function can also be used when the caller has been
// inlined, we use the slow but accurate {GetCallerArguments}.
int argument_count = 0;
diff --git a/deps/v8/src/runtime/runtime-test-wasm.cc b/deps/v8/src/runtime/runtime-test-wasm.cc
index 8425b1fa18..b33cbeae39 100644
--- a/deps/v8/src/runtime/runtime-test-wasm.cc
+++ b/deps/v8/src/runtime/runtime-test-wasm.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-wasm.h"
#include "src/base/memory.h"
#include "src/base/platform/mutex.h"
#include "src/execution/arguments-inl.h"
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 69b0f6241b..3b49e8a891 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/base/numbers/double.h"
#include "src/base/platform/mutex.h"
@@ -70,6 +71,18 @@ V8_WARN_UNUSED_RESULT Object ReturnFuzzSafe(Object value, Isolate* isolate) {
if (!args[index].IsBoolean()) return CrashUnlessFuzzing(isolate); \
bool name = args[index].IsTrue(isolate);
+bool IsAsmWasmFunction(Isolate* isolate, JSFunction function) {
+ DisallowGarbageCollection no_gc;
+#if V8_ENABLE_WEBASSEMBLY
+ // For simplicity we include invalid asm.js functions whose code hasn't yet
+ // been updated to CompileLazy but is still the InstantiateAsmJs builtin.
+ return function.shared().HasAsmWasmData() ||
+ function.code().builtin_id() == Builtin::kInstantiateAsmJs;
+#else
+ return false;
+#endif // V8_ENABLE_WEBASSEMBLY
+}
+
} // namespace
RUNTIME_FUNCTION(Runtime_ClearMegamorphicStubCache) {
@@ -242,11 +255,9 @@ bool CanOptimizeFunction(Handle<JSFunction> function, Isolate* isolate,
return CrashUnlessFuzzingReturnFalse(isolate);
}
-#if V8_ENABLE_WEBASSEMBLY
- if (function->shared().HasAsmWasmData()) {
+ if (IsAsmWasmFunction(isolate, *function)) {
return CrashUnlessFuzzingReturnFalse(isolate);
}
-#endif // V8_ENABLE_WEBASSEMBLY
if (FLAG_testing_d8_test_runner) {
PendingOptimizationTable::MarkedForOptimization(isolate, function);
@@ -362,12 +373,12 @@ RUNTIME_FUNCTION(Runtime_CompileBaseline) {
// First compile the bytecode, if we have to.
if (!is_compiled_scope.is_compiled() &&
- !Compiler::Compile(isolate, function, Compiler::KEEP_EXCEPTION,
+ !Compiler::Compile(isolate, function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
return CrashUnlessFuzzing(isolate);
}
- if (!Compiler::CompileBaseline(isolate, function, Compiler::KEEP_EXCEPTION,
+ if (!Compiler::CompileBaseline(isolate, function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
return CrashUnlessFuzzing(isolate);
}
@@ -424,9 +435,7 @@ RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
return CrashUnlessFuzzing(isolate);
}
-#if V8_ENABLE_WEBASSEMBLY
- if (function->shared().HasAsmWasmData()) return CrashUnlessFuzzing(isolate);
-#endif // V8_ENABLE_WEBASSEMBLY
+ if (IsAsmWasmFunction(isolate, *function)) return CrashUnlessFuzzing(isolate);
// Hold onto the bytecode array between marking and optimization to ensure
// it's not flushed.
@@ -569,7 +578,7 @@ RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1 || args.length() == 2);
+ DCHECK_EQ(args.length(), 1);
int status = 0;
if (FLAG_lite_mode || FLAG_jitless) {
// Both jitless and lite modes cannot optimize. Unit tests should handle
@@ -590,32 +599,8 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
if (function_object->IsUndefined()) return Smi::FromInt(status);
if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
-
status |= static_cast<int>(OptimizationStatus::kIsFunction);
- bool sync_with_compiler_thread = true;
- if (args.length() == 2) {
- CONVERT_ARG_HANDLE_CHECKED(Object, sync_object, 1);
- if (!sync_object->IsString()) return CrashUnlessFuzzing(isolate);
- Handle<String> sync = Handle<String>::cast(sync_object);
- if (sync->IsOneByteEqualTo(base::StaticCharVector("no sync"))) {
- sync_with_compiler_thread = false;
- } else if (sync->IsOneByteEqualTo(base::StaticCharVector("sync")) ||
- sync->length() == 0) {
- DCHECK(sync_with_compiler_thread);
- } else {
- return CrashUnlessFuzzing(isolate);
- }
- }
-
- if (isolate->concurrent_recompilation_enabled() &&
- sync_with_compiler_thread) {
- while (function->IsInOptimizationQueue()) {
- isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
- base::OS::Sleep(base::TimeDelta::FromMilliseconds(50));
- }
- }
-
if (function->IsMarkedForOptimization()) {
status |= static_cast<int>(OptimizationStatus::kMarkedForOptimization);
} else if (function->IsMarkedForConcurrentOptimization()) {
@@ -670,39 +655,32 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
return Smi::FromInt(status);
}
-RUNTIME_FUNCTION(Runtime_UnblockConcurrentRecompilation) {
- DCHECK_EQ(0, args.length());
- CHECK(FLAG_block_concurrent_recompilation);
- CHECK(isolate->concurrent_recompilation_enabled());
- isolate->optimizing_compile_dispatcher()->Unblock();
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_DisableOptimizationFinalization) {
DCHECK_EQ(0, args.length());
- DCHECK(!FLAG_block_concurrent_recompilation);
- CHECK(isolate->concurrent_recompilation_enabled());
- isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
- isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
- isolate->optimizing_compile_dispatcher()->set_finalize(false);
+ if (isolate->concurrent_recompilation_enabled()) {
+ isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
+ isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
+ isolate->stack_guard()->ClearInstallCode();
+ isolate->optimizing_compile_dispatcher()->set_finalize(false);
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_WaitForBackgroundOptimization) {
DCHECK_EQ(0, args.length());
- DCHECK(!FLAG_block_concurrent_recompilation);
- CHECK(isolate->concurrent_recompilation_enabled());
- isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
+ if (isolate->concurrent_recompilation_enabled()) {
+ isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_FinalizeOptimization) {
DCHECK_EQ(0, args.length());
- DCHECK(!FLAG_block_concurrent_recompilation);
- CHECK(isolate->concurrent_recompilation_enabled());
- isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
- isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
- isolate->optimizing_compile_dispatcher()->set_finalize(true);
+ if (isolate->concurrent_recompilation_enabled()) {
+ isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
+ isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
+ isolate->optimizing_compile_dispatcher()->set_finalize(true);
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -1117,6 +1095,11 @@ RUNTIME_FUNCTION(Runtime_PretenureAllocationSite) {
JSObject object = JSObject::cast(arg);
Heap* heap = object.GetHeap();
+ if (!heap->InYoungGeneration(object)) {
+ // Object is not in new space, thus there is no memento and nothing to do.
+ return ReturnFuzzSafe(ReadOnlyRoots(isolate).false_value(), isolate);
+ }
+
AllocationMemento memento =
heap->FindAllocationMemento<Heap::kForRuntime>(object.map(), object);
if (memento.is_null())
@@ -1422,10 +1405,8 @@ RUNTIME_FUNCTION(Runtime_NewRegExpWithBacktrackLimit) {
CONVERT_ARG_HANDLE_CHECKED(String, flags_string, 1);
CONVERT_UINT32_ARG_CHECKED(backtrack_limit, 2);
- bool success = false;
JSRegExp::Flags flags =
- JSRegExp::FlagsFromString(isolate, flags_string, &success);
- CHECK(success);
+ JSRegExp::FlagsFromString(isolate, flags_string).value();
RETURN_RESULT_OR_FAILURE(
isolate, JSRegExp::New(isolate, pattern, flags, backtrack_limit));
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index 5d0fc35944..ca3a50ee76 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -94,7 +94,7 @@ RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, array, 0);
DCHECK(!array->WasDetached());
-#if V8_OS_LINUX
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
if (FLAG_multi_mapped_mock_allocator) {
// Sorting is meaningless with the mock allocator, and std::sort
// might crash (because aliasing elements violate its assumptions).
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index 47f184a3a0..3bcd41dfcb 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -203,7 +203,9 @@ bool Runtime::IsAllowListedForFuzzing(FunctionId id) {
case Runtime::kArrayBufferDetach:
case Runtime::kDeoptimizeFunction:
case Runtime::kDeoptimizeNow:
+ case Runtime::kDisableOptimizationFinalization:
case Runtime::kEnableCodeLoggingForTesting:
+ case Runtime::kFinalizeOptimization:
case Runtime::kGetUndetectable:
case Runtime::kNeverOptimizeFunction:
case Runtime::kOptimizeFunctionOnNextCall:
@@ -212,6 +214,7 @@ bool Runtime::IsAllowListedForFuzzing(FunctionId id) {
case Runtime::kPretenureAllocationSite:
case Runtime::kSetAllocationTimeout:
case Runtime::kSimulateNewspaceFull:
+ case Runtime::kWaitForBackgroundOptimization:
return true;
// Runtime functions only permitted for non-differential fuzzers.
// This list may contain functions performing extra checks or returning
@@ -221,9 +224,9 @@ bool Runtime::IsAllowListedForFuzzing(FunctionId id) {
case Runtime::kIsBeingInterpreted:
case Runtime::kVerifyType:
return !FLAG_allow_natives_for_differential_fuzzing;
- case Runtime::kCompileBaseline:
case Runtime::kBaselineOsr:
- return FLAG_sparkplug;
+ case Runtime::kCompileBaseline:
+ return ENABLE_SPARKPLUG;
default:
return false;
}
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 045ffb3641..fed9c01416 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -7,7 +7,7 @@
#include <memory>
-#include "include/v8.h"
+#include "include/v8-maybe.h"
#include "src/base/bit-field.h"
#include "src/base/platform/time.h"
#include "src/common/globals.h"
@@ -488,7 +488,7 @@ namespace internal {
F(FinalizeOptimization, 0, 1) \
F(GetCallable, 0, 1) \
F(GetInitializerFunction, 1, 1) \
- F(GetOptimizationStatus, -1, 1) \
+ F(GetOptimizationStatus, 1, 1) \
F(GetUndetectable, 0, 1) \
F(GlobalPrint, 1, 1) \
F(HasDictionaryElements, 1, 1) \
@@ -558,7 +558,6 @@ namespace internal {
F(TraceExit, 1, 1) \
F(TurbofanStaticAssert, 1, 1) \
F(TypedArraySpeciesProtector, 0, 1) \
- F(UnblockConcurrentRecompilation, 0, 1) \
F(WaitForBackgroundOptimization, 0, 1) \
I(DeoptimizeNow, 0, 1)
diff --git a/deps/v8/src/snapshot/context-deserializer.cc b/deps/v8/src/snapshot/context-deserializer.cc
index ad109bacca..fb643ba014 100644
--- a/deps/v8/src/snapshot/context-deserializer.cc
+++ b/deps/v8/src/snapshot/context-deserializer.cc
@@ -61,7 +61,6 @@ void ContextDeserializer::SetupOffHeapArrayBufferBackingStores() {
for (Handle<JSArrayBuffer> buffer : new_off_heap_array_buffers()) {
uint32_t store_index = buffer->GetBackingStoreRefForDeserialization();
auto bs = backing_store(store_index);
- buffer->AllocateExternalPointerEntries(isolate());
// TODO(v8:11111): Support RAB / GSAB.
CHECK(!buffer->is_resizable());
SharedFlag shared =
diff --git a/deps/v8/src/snapshot/context-serializer.cc b/deps/v8/src/snapshot/context-serializer.cc
index 7a02a50caa..96d9d5f03e 100644
--- a/deps/v8/src/snapshot/context-serializer.cc
+++ b/deps/v8/src/snapshot/context-serializer.cc
@@ -177,8 +177,8 @@ void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
Handle<JSFunction> closure = Handle<JSFunction>::cast(obj);
closure->ResetIfCodeFlushed();
if (closure->is_compiled()) {
- if (closure->shared().HasBaselineData()) {
- closure->shared().flush_baseline_data();
+ if (closure->shared().HasBaselineCode()) {
+ closure->shared().FlushBaselineCode();
}
closure->set_code(closure->shared().GetCode(), kReleaseStore);
}
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index fab2f80355..9f32faf67a 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -482,7 +482,6 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
// a numbered reference to an already deserialized backing store.
backing_store = backing_stores_[store_index]->buffer_start();
}
- data_view->AllocateExternalPointerEntries(main_thread_isolate());
data_view->set_data_pointer(
main_thread_isolate(),
reinterpret_cast<uint8_t*>(backing_store) + data_view->byte_offset());
@@ -491,7 +490,6 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
// Fixup typed array pointers.
if (typed_array->is_on_heap()) {
Address raw_external_pointer = typed_array->external_pointer_raw();
- typed_array->AllocateExternalPointerEntries(main_thread_isolate());
typed_array->SetOnHeapDataPtr(
main_thread_isolate(), HeapObject::cast(typed_array->base_pointer()),
raw_external_pointer);
@@ -503,7 +501,6 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
auto start = backing_store
? reinterpret_cast<byte*>(backing_store->buffer_start())
: nullptr;
- typed_array->AllocateExternalPointerEntries(main_thread_isolate());
typed_array->SetOffHeapDataPtr(main_thread_isolate(), start,
typed_array->byte_offset());
}
@@ -513,7 +510,6 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
if (buffer->GetBackingStoreRefForDeserialization() != kNullRefSentinel) {
new_off_heap_array_buffers_.push_back(buffer);
} else {
- buffer->AllocateExternalPointerEntries(main_thread_isolate());
buffer->set_backing_store(main_thread_isolate(), nullptr);
}
} else if (InstanceTypeChecker::IsBytecodeArray(instance_type)) {
diff --git a/deps/v8/src/snapshot/embedded/embedded-data.cc b/deps/v8/src/snapshot/embedded/embedded-data.cc
index 166e41d324..188ed6e879 100644
--- a/deps/v8/src/snapshot/embedded/embedded-data.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-data.cc
@@ -218,7 +218,7 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_S390) || \
- defined(V8_TARGET_ARCH_RISCV64)
+ defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64)
// On these platforms we emit relative builtin-to-builtin
// jumps for isolate independent builtins in the snapshot. This fixes up the
// relative jumps to the right offsets in the snapshot.
@@ -246,7 +246,7 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
// indirection through the root register.
CHECK(on_heap_it.done());
CHECK(off_heap_it.done());
-#endif // defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64)
+#endif
}
}
diff --git a/deps/v8/src/snapshot/embedded/embedded-empty.cc b/deps/v8/src/snapshot/embedded/embedded-empty.cc
index c32b459d9d..e5355215f2 100644
--- a/deps/v8/src/snapshot/embedded/embedded-empty.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-empty.cc
@@ -17,15 +17,3 @@ const uint8_t* v8_Default_embedded_blob_code_ = nullptr;
uint32_t v8_Default_embedded_blob_code_size_ = 0;
const uint8_t* v8_Default_embedded_blob_data_ = nullptr;
uint32_t v8_Default_embedded_blob_data_size_ = 0;
-
-#ifdef V8_MULTI_SNAPSHOTS
-extern "C" const uint8_t* v8_Trusted_embedded_blob_code_;
-extern "C" uint32_t v8_Trusted_embedded_blob_code_size_;
-extern "C" const uint8_t* v8_Trusted_embedded_blob_data_;
-extern "C" uint32_t v8_Trusted_embedded_blob_data_size_;
-
-const uint8_t* v8_Trusted_embedded_blob_code_ = nullptr;
-uint32_t v8_Trusted_embedded_blob_code_size_ = 0;
-const uint8_t* v8_Trusted_embedded_blob_data_ = nullptr;
-uint32_t v8_Trusted_embedded_blob_data_size_ = 0;
-#endif
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
index 41cd9dbca0..e858da90b5 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
@@ -65,8 +65,14 @@ void PlatformEmbeddedFileWriterAIX::DeclareSymbolGlobal(const char* name) {
}
void PlatformEmbeddedFileWriterAIX::AlignToCodeAlignment() {
+#if V8_TARGET_ARCH_X64
+ // On x64 use 64-bytes code alignment to allow 64-bytes loop header alignment.
+ STATIC_ASSERT((1 << 6) >= kCodeAlignment);
+ fprintf(fp_, ".align 6\n");
+#else
STATIC_ASSERT((1 << 5) >= kCodeAlignment);
fprintf(fp_, ".align 5\n");
+#endif
}
void PlatformEmbeddedFileWriterAIX::AlignToDataAlignment() {
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
index e2d5dcb41c..641d3638f3 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
@@ -74,8 +74,14 @@ void PlatformEmbeddedFileWriterGeneric::DeclareSymbolGlobal(const char* name) {
}
void PlatformEmbeddedFileWriterGeneric::AlignToCodeAlignment() {
+#if V8_TARGET_ARCH_X64
+ // On x64 use 64-bytes code alignment to allow 64-bytes loop header alignment.
+ STATIC_ASSERT(64 >= kCodeAlignment);
+ fprintf(fp_, ".balign 64\n");
+#else
STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".balign 32\n");
+#endif
}
void PlatformEmbeddedFileWriterGeneric::AlignToDataAlignment() {
@@ -152,8 +158,9 @@ int PlatformEmbeddedFileWriterGeneric::IndentedDataDirective(
DataDirective PlatformEmbeddedFileWriterGeneric::ByteChunkDataDirective()
const {
-#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
- // MIPS uses a fixed 4 byte instruction set, using .long
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
+ defined(V8_TARGET_ARCH_LOONG64)
+ // MIPS and LOONG64 uses a fixed 4 byte instruction set, using .long
// to prevent any unnecessary padding.
return kLong;
#else
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
index 5fa12ec6ea..cfe9bbcde1 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
@@ -56,12 +56,18 @@ void PlatformEmbeddedFileWriterMac::DeclareSymbolGlobal(const char* name) {
// prevents something along the compilation chain from messing with the
// embedded blob. Using .global here causes embedded blob hash verification
// failures at runtime.
- STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".private_extern _%s\n", name);
}
void PlatformEmbeddedFileWriterMac::AlignToCodeAlignment() {
+#if V8_TARGET_ARCH_X64
+ // On x64 use 64-bytes code alignment to allow 64-bytes loop header alignment.
+ STATIC_ASSERT(64 >= kCodeAlignment);
+ fprintf(fp_, ".balign 64\n");
+#else
+ STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".balign 32\n");
+#endif
}
void PlatformEmbeddedFileWriterMac::AlignToDataAlignment() {
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
index 7b4eadd98a..83b85c8df9 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
@@ -637,7 +637,14 @@ void PlatformEmbeddedFileWriterWin::DeclareSymbolGlobal(const char* name) {
}
void PlatformEmbeddedFileWriterWin::AlignToCodeAlignment() {
+#if V8_TARGET_ARCH_X64
+ // On x64 use 64-bytes code alignment to allow 64-bytes loop header alignment.
+ STATIC_ASSERT(64 >= kCodeAlignment);
+ fprintf(fp_, ".balign 64\n");
+#else
+ STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".balign 32\n");
+#endif
}
void PlatformEmbeddedFileWriterWin::AlignToDataAlignment() {
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index 4e5b43b23f..86b0304fb0 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -9,6 +9,7 @@
#include <iomanip>
#include "include/libplatform/libplatform.h"
+#include "include/v8-initialization.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
#include "src/base/sanitizer/msan.h"
@@ -239,6 +240,11 @@ int main(int argc, char** argv) {
v8::V8::InitializeICUDefaultLocation(argv[0]);
std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(platform.get());
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (!v8::V8::InitializeVirtualMemoryCage()) {
+ FATAL("Could not initialize the virtual memory cage");
+ }
+#endif
v8::V8::Initialize();
{
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 68fb1a01a6..47221dd952 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -121,12 +121,14 @@ void Serializer::SerializeObject(Handle<HeapObject> obj) {
// indirection and serialize the actual string directly.
if (obj->IsThinString(isolate())) {
obj = handle(ThinString::cast(*obj).actual(isolate()), isolate());
- } else if (obj->IsBaselineData()) {
- // For now just serialize the BytecodeArray instead of baseline data.
- // TODO(v8:11429,pthier): Handle BaselineData in cases we want to serialize
- // Baseline code.
- obj = handle(Handle<BaselineData>::cast(obj)->GetActiveBytecodeArray(),
- isolate());
+ } else if (obj->IsCodeT()) {
+ Code code = FromCodeT(CodeT::cast(*obj));
+ if (code.kind() == CodeKind::BASELINE) {
+ // For now just serialize the BytecodeArray instead of baseline code.
+ // TODO(v8:11429,pthier): Handle Baseline code in cases we want to
+ // serialize it.
+ obj = handle(code.bytecode_or_interpreter_data(isolate()), isolate());
+ }
}
SerializeObjectImpl(obj);
}
@@ -521,10 +523,6 @@ void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
ArrayBufferExtension* extension = buffer->extension();
// The embedder-allocated backing store only exists for the off-heap case.
-#ifdef V8_HEAP_SANDBOX
- uint32_t external_pointer_entry =
- buffer->GetBackingStoreRefForDeserialization();
-#endif
if (backing_store != nullptr) {
uint32_t ref = SerializeBackingStore(backing_store, byte_length);
buffer->SetBackingStoreRefForSerialization(ref);
@@ -538,11 +536,7 @@ void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
SerializeObject();
-#ifdef V8_HEAP_SANDBOX
- buffer->SetBackingStoreRefForSerialization(external_pointer_entry);
-#else
buffer->set_backing_store(isolate(), backing_store);
-#endif
buffer->set_extension(extension);
}
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index 2f16eee6d5..f176faa607 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -5,7 +5,7 @@
#ifndef V8_SNAPSHOT_SNAPSHOT_H_
#define V8_SNAPSHOT_SNAPSHOT_H_
-#include "include/v8.h" // For StartupData.
+#include "include/v8-snapshot.h" // For StartupData.
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
diff --git a/deps/v8/src/tasks/OWNERS b/deps/v8/src/tasks/OWNERS
index f7a22ea908..69a86ca984 100644
--- a/deps/v8/src/tasks/OWNERS
+++ b/deps/v8/src/tasks/OWNERS
@@ -1,4 +1,3 @@
ahaas@chromium.org
clemensb@chromium.org
mlippautz@chromium.org
-rmcilroy@chromium.org
diff --git a/deps/v8/src/third_party/vtune/BUILD.gn b/deps/v8/src/third_party/vtune/BUILD.gn
index e8582dbb79..d763da1064 100644
--- a/deps/v8/src/third_party/vtune/BUILD.gn
+++ b/deps/v8/src/third_party/vtune/BUILD.gn
@@ -22,6 +22,11 @@ static_library("v8_vtune") {
"vtune-jit.h",
]
configs += [ ":vtune_ittapi" ]
+
+ # TODO(delphick): Consider changing these to be v8_source_sets
+ if (!build_with_chromium && is_clang) {
+ configs -= [ "//build/config/clang:find_bad_constructs" ]
+ }
deps = [ "../../..:v8" ]
}
diff --git a/deps/v8/src/third_party/vtune/v8-vtune.h b/deps/v8/src/third_party/vtune/v8-vtune.h
index 34da9cb5bf..2ef1bf8cc4 100644
--- a/deps/v8/src/third_party/vtune/v8-vtune.h
+++ b/deps/v8/src/third_party/vtune/v8-vtune.h
@@ -58,7 +58,7 @@
#ifndef V8_VTUNE_H_
#define V8_VTUNE_H_
-#include "../../../include/v8.h"
+#include "../../../include/v8-callbacks.h"
namespace vTune {
diff --git a/deps/v8/src/third_party/vtune/vtune-jit.cc b/deps/v8/src/third_party/vtune/vtune-jit.cc
index 08fbfbfe39..7b9d338c3e 100644
--- a/deps/v8/src/third_party/vtune/vtune-jit.cc
+++ b/deps/v8/src/third_party/vtune/vtune-jit.cc
@@ -56,6 +56,8 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include "vtune-jit.h"
+
#include <stdlib.h>
#include <string.h>
@@ -65,8 +67,12 @@
#include <unordered_map>
#include <vector>
+#include "../../../include/v8-callbacks.h"
+#include "../../../include/v8-initialization.h"
+#include "../../../include/v8-local-handle.h"
+#include "../../../include/v8-primitive.h"
+#include "../../../include/v8-script.h"
#include "v8-vtune.h"
-#include "vtune-jit.h"
namespace vTune {
namespace internal {
diff --git a/deps/v8/src/third_party/vtune/vtune-jit.h b/deps/v8/src/third_party/vtune/vtune-jit.h
index 4e5af45c61..148c82434f 100644
--- a/deps/v8/src/third_party/vtune/vtune-jit.h
+++ b/deps/v8/src/third_party/vtune/vtune-jit.h
@@ -58,11 +58,14 @@
#ifndef VTUNE_VTUNE_JIT_H_
#define VTUNE_VTUNE_JIT_H_
-#include "../../../include/v8.h"
#include "third_party/ittapi/include/jitprofiling.h"
#define VTUNERUNNING (iJIT_IsProfilingActive() == iJIT_SAMPLING_ON)
+namespace v8 {
+struct JitCodeEvent;
+}
+
namespace vTune {
namespace internal {
using namespace v8;
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index b814f0cc63..dceb660c21 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -579,7 +579,7 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
.Position(signature.parameter_names[signature.implicit_count]->pos);
}
- csa_ccfile() << " TNode<Word32T> argc = UncheckedParameter<Word32T>("
+ csa_ccfile() << " TNode<Word32T> argc = UncheckedParameter<Word32T>("
<< "Descriptor::kJSActualArgumentsCount);\n";
csa_ccfile() << " TNode<IntPtrT> "
"arguments_length(ChangeInt32ToIntPtr(UncheckedCast<"
@@ -588,13 +588,17 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
"UncheckedCast<RawPtrT>(LoadFramePointer());\n";
csa_ccfile() << " TorqueStructArguments "
"torque_arguments(GetFrameArguments(arguments_frame, "
- "arguments_length));\n";
+ "arguments_length, FrameArgumentsArgcType::"
+ << (kJSArgcIncludesReceiver ? "kCountIncludesReceiver"
+ : "kCountExcludesReceiver")
+ << "));\n";
csa_ccfile()
<< " CodeStubArguments arguments(this, torque_arguments);\n";
parameters.Push("torque_arguments.frame");
parameters.Push("torque_arguments.base");
parameters.Push("torque_arguments.length");
+ parameters.Push("torque_arguments.actual_count");
const Type* arguments_type = TypeOracle::GetArgumentsType();
StackRange range = parameter_types.PushMany(LowerType(arguments_type));
parameter_bindings.Add(*signature.arguments_variable,
@@ -625,7 +629,7 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
? "arguments.GetReceiver()"
: "UncheckedParameter<Object>(Descriptor::kReceiver)")
<< ";\n";
- csa_ccfile() << "USE(" << generated_name << ");\n";
+ csa_ccfile() << " USE(" << generated_name << ");\n";
expected_types = {TypeOracle::GetJSAnyType()};
} else if (param_name == "newTarget") {
csa_ccfile() << " TNode<Object> " << generated_name
@@ -3521,7 +3525,7 @@ void ImplementationVisitor::GenerateBuiltinDefinitionsAndInterfaceDescriptors(
// count.
int parameter_count =
static_cast<int>(builtin->signature().ExplicitCount());
- builtin_definitions << ", " << parameter_count;
+ builtin_definitions << ", " << JSParameterCount(parameter_count);
// And the receiver is explicitly declared.
builtin_definitions << ", kReceiver";
for (size_t i = builtin->signature().implicit_count;
@@ -3855,11 +3859,13 @@ namespace {
class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
public:
ClassFieldOffsetGenerator(std::ostream& header, std::ostream& inline_header,
- const ClassType* type, std::string gen_name)
+ const ClassType* type, std::string gen_name,
+ const ClassType* parent)
: FieldOffsetsGenerator(type),
hdr_(header),
inl_(inline_header),
- previous_field_end_("P::kHeaderSize"),
+ previous_field_end_((parent && parent->IsShape()) ? "P::kSize"
+ : "P::kHeaderSize"),
gen_name_(gen_name) {}
void WriteField(const Field& f, const std::string& size_string) override {
std::string field = "k" + CamelifyString(f.name_and_type.name) + "Offset";
@@ -3981,7 +3987,7 @@ base::Optional<std::vector<Field>> GetOrderedUniqueIndexFields(
void CppClassGenerator::GenerateClass() {
// Is<name>_NonInline(HeapObject)
- {
+ if (!type_->IsShape()) {
cpp::Function f("Is"s + name_ + "_NonInline");
f.SetDescription("Alias for HeapObject::Is"s + name_ +
"() that avoids inlining.");
@@ -4046,7 +4052,8 @@ void CppClassGenerator::GenerateClass() {
}
hdr_ << "\n";
- ClassFieldOffsetGenerator g(hdr_, inl_, type_, gen_name_);
+ ClassFieldOffsetGenerator g(hdr_, inl_, type_, gen_name_,
+ type_->GetSuperClass());
for (auto f : type_->fields()) {
CurrentSourcePosition::Scope scope(f.pos);
g.RecordOffsetFor(f);
@@ -4174,6 +4181,15 @@ void CppClassGenerator::GenerateClassCasts() {
}
void CppClassGenerator::GenerateClassConstructors() {
+ const ClassType* typecheck_type = type_;
+ while (typecheck_type->IsShape()) {
+ typecheck_type = typecheck_type->GetSuperClass();
+
+ // Shapes have already been checked earlier to inherit from JSObject, so we
+ // should have found an appropriate type.
+ DCHECK(typecheck_type);
+ }
+
hdr_ << " public:\n";
hdr_ << " template <class DAlias = D>\n";
hdr_ << " constexpr " << gen_name_ << "() : P() {\n";
@@ -4194,7 +4210,8 @@ void CppClassGenerator::GenerateClassConstructors() {
inl_ << "template<class D, class P>\n";
inl_ << "inline " << gen_name_T_ << "::" << gen_name_ << "(Address ptr)\n";
inl_ << " : P(ptr) {\n";
- inl_ << " SLOW_DCHECK(Is" << name_ << "_NonInline(*this));\n";
+ inl_ << " SLOW_DCHECK(Is" << typecheck_type->name()
+ << "_NonInline(*this));\n";
inl_ << "}\n";
inl_ << "template<class D, class P>\n";
@@ -4204,7 +4221,7 @@ void CppClassGenerator::GenerateClassConstructors() {
inl_ << " SLOW_DCHECK("
<< "(allow_smi == HeapObject::AllowInlineSmiStorage::kAllowBeingASmi"
" && this->IsSmi()) || Is"
- << name_ << "_NonInline(*this));\n";
+ << typecheck_type->name() << "_NonInline(*this));\n";
inl_ << "}\n";
}
@@ -4603,9 +4620,11 @@ void ImplementationVisitor::GenerateClassDefinitions(
for (const ClassType* type : TypeOracle::GetClasses()) {
auto& streams = GlobalContext::GeneratedPerFile(type->AttributedToFile());
std::ostream& header = streams.class_definition_headerfile;
- header << "class " << type->GetGeneratedTNodeTypeName() << ";\n";
- forward_declarations << "class " << type->GetGeneratedTNodeTypeName()
- << ";\n";
+ std::string name = type->GenerateCppClassDefinitions()
+ ? type->name()
+ : type->GetGeneratedTNodeTypeName();
+ header << "class " << name << ";\n";
+ forward_declarations << "class " << name << ";\n";
}
for (const ClassType* type : TypeOracle::GetClasses()) {
diff --git a/deps/v8/src/utils/address-map.h b/deps/v8/src/utils/address-map.h
index 6a9c513bc6..0a6c749b39 100644
--- a/deps/v8/src/utils/address-map.h
+++ b/deps/v8/src/utils/address-map.h
@@ -5,7 +5,6 @@
#ifndef V8_UTILS_ADDRESS_MAP_H_
#define V8_UTILS_ADDRESS_MAP_H_
-#include "include/v8.h"
#include "src/base/hashmap.h"
#include "src/common/assert-scope.h"
#include "src/objects/heap-object.h"
diff --git a/deps/v8/src/utils/allocation.cc b/deps/v8/src/utils/allocation.cc
index 9cdd53fa6d..6f6225797a 100644
--- a/deps/v8/src/utils/allocation.cc
+++ b/deps/v8/src/utils/allocation.cc
@@ -17,6 +17,7 @@
#include "src/base/vector.h"
#include "src/flags/flags.h"
#include "src/init/v8.h"
+#include "src/init/vm-cage.h"
#include "src/utils/memcopy.h"
#if V8_LIBC_BIONIC
@@ -53,6 +54,7 @@ class PageAllocatorInitializer {
page_allocator_ = default_page_allocator.get();
}
#if defined(LEAK_SANITIZER)
+ static_assert(!V8_VIRTUAL_MEMORY_CAGE_BOOL, "Not currently supported");
static base::LeakyObject<base::LsanPageAllocator> lsan_allocator(
page_allocator_);
page_allocator_ = lsan_allocator.get();
@@ -61,16 +63,25 @@ class PageAllocatorInitializer {
PageAllocator* page_allocator() const { return page_allocator_; }
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ PageAllocator* data_cage_page_allocator() const {
+ return data_cage_page_allocator_;
+ }
+#endif
+
void SetPageAllocatorForTesting(PageAllocator* allocator) {
page_allocator_ = allocator;
}
private:
PageAllocator* page_allocator_;
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ PageAllocator* data_cage_page_allocator_;
+#endif
};
DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,
- GetPageTableInitializer)
+ GetPageAllocatorInitializer)
// We will attempt allocation this many times. After each failure, we call
// OnCriticalMemoryPressure to try to free some memory.
@@ -79,14 +90,29 @@ const int kAllocationTries = 2;
} // namespace
v8::PageAllocator* GetPlatformPageAllocator() {
- DCHECK_NOT_NULL(GetPageTableInitializer()->page_allocator());
- return GetPageTableInitializer()->page_allocator();
+ DCHECK_NOT_NULL(GetPageAllocatorInitializer()->page_allocator());
+ return GetPageAllocatorInitializer()->page_allocator();
}
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+// TODO(chromium:1218005) once we disallow disabling the cage, name this e.g.
+// "GetPlatformDataPageAllocator", and set it to the PlatformPageAllocator when
+// V8_VIRTUAL_MEMORY_CAGE is not defined. Then use that allocator whenever
+// allocating ArrayBuffer backing stores inside v8.
+v8::PageAllocator* GetPlatformDataCagePageAllocator() {
+ if (GetProcessWideVirtualMemoryCage()->is_disabled()) {
+ return GetPlatformPageAllocator();
+ } else {
+ CHECK(GetProcessWideVirtualMemoryCage()->is_initialized());
+ return GetProcessWideVirtualMemoryCage()->GetDataCagePageAllocator();
+ }
+}
+#endif
+
v8::PageAllocator* SetPlatformPageAllocatorForTesting(
v8::PageAllocator* new_page_allocator) {
v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator();
- GetPageTableInitializer()->SetPageAllocatorForTesting(new_page_allocator);
+ GetPageAllocatorInitializer()->SetPageAllocatorForTesting(new_page_allocator);
return old_page_allocator;
}
@@ -323,7 +349,8 @@ inline Address VirtualMemoryCageStart(
}
} // namespace
-bool VirtualMemoryCage::InitReservation(const ReservationParams& params) {
+bool VirtualMemoryCage::InitReservation(
+ const ReservationParams& params, base::AddressRegion existing_reservation) {
DCHECK(!reservation_.IsReserved());
const size_t allocate_page_size = params.page_allocator->AllocatePageSize();
@@ -337,7 +364,16 @@ bool VirtualMemoryCage::InitReservation(const ReservationParams& params) {
RoundUp(params.base_alignment, allocate_page_size)) -
RoundUp(params.base_bias_size, allocate_page_size);
- if (params.base_alignment == ReservationParams::kAnyBaseAlignment) {
+ if (!existing_reservation.is_empty()) {
+ CHECK_EQ(existing_reservation.size(), params.reservation_size);
+ CHECK(params.base_alignment == ReservationParams::kAnyBaseAlignment ||
+ IsAligned(existing_reservation.begin(), params.base_alignment));
+ reservation_ =
+ VirtualMemory(params.page_allocator, existing_reservation.begin(),
+ existing_reservation.size());
+ base_ = reservation_.address() + params.base_bias_size;
+ reservation_is_owned_ = false;
+ } else if (params.base_alignment == ReservationParams::kAnyBaseAlignment) {
// When the base doesn't need to be aligned, the virtual memory reservation
// fails only due to OOM.
VirtualMemory reservation(params.page_allocator, params.reservation_size,
@@ -426,7 +462,13 @@ void VirtualMemoryCage::Free() {
if (IsReserved()) {
base_ = kNullAddress;
page_allocator_.reset();
- reservation_.Free();
+ if (reservation_is_owned_) {
+ reservation_.Free();
+ } else {
+ // Reservation is owned by the Platform.
+ DCHECK(V8_VIRTUAL_MEMORY_CAGE_BOOL);
+ reservation_.Reset();
+ }
}
}
diff --git a/deps/v8/src/utils/allocation.h b/deps/v8/src/utils/allocation.h
index 1d161b7e24..93499cc5e1 100644
--- a/deps/v8/src/utils/allocation.h
+++ b/deps/v8/src/utils/allocation.h
@@ -100,6 +100,12 @@ V8_EXPORT_PRIVATE void AlignedFree(void* ptr);
// Returns platfrom page allocator instance. Guaranteed to be a valid pointer.
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+// Returns the platform data cage page allocator instance. Guaranteed to be a
+// valid pointer.
+V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformDataCagePageAllocator();
+#endif
+
// Sets the given page allocator as the platform page allocator and returns
// the current one. This function *must* be used only for testing purposes.
// It is not thread-safe and the testing infrastructure should ensure that
@@ -310,6 +316,9 @@ class VirtualMemory final {
// and the base bias size must be AllocatePageSize-aligned.
// - The base alignment may be kAnyBaseAlignment to denote any alignment is
// acceptable. In this case the base bias size does not need to be aligned.
+//
+// TODO(chromium:1218005) can we either combine this class and
+// v8::VirtualMemoryCage in v8-platform.h or rename one of the two?
class VirtualMemoryCage {
public:
VirtualMemoryCage();
@@ -351,13 +360,23 @@ class VirtualMemoryCage {
// A number of attempts is made to try to reserve a region that satisfies the
// constraints in params, but this may fail. The base address may be different
// than the one requested.
- bool InitReservation(const ReservationParams& params);
+ // If an existing reservation is provided, it will be used for this cage
+ // instead. The caller retains ownership of the reservation and is responsible
+ // for keeping the memory reserved during the lifetime of this object.
+ bool InitReservation(
+ const ReservationParams& params,
+ base::AddressRegion existing_reservation = base::AddressRegion());
void Free();
protected:
Address base_ = kNullAddress;
std::unique_ptr<base::BoundedPageAllocator> page_allocator_;
+ // Whether this cage owns the virtual memory reservation and thus should
+ // release it upon destruction. TODO(chromium:1218005) this is only needed
+ // when V8_VIRTUAL_MEMORY_CAGE is enabled. Maybe we can remove this again e.g.
+ // by merging this class and v8::VirtualMemoryCage in v8-platform.h.
+ bool reservation_is_owned_ = true;
VirtualMemory reservation_;
};
diff --git a/deps/v8/src/utils/v8dll-main.cc b/deps/v8/src/utils/v8dll-main.cc
index 6b484cfc8e..9bdd97f365 100644
--- a/deps/v8/src/utils/v8dll-main.cc
+++ b/deps/v8/src/utils/v8dll-main.cc
@@ -5,7 +5,7 @@
// The GYP based build ends up defining USING_V8_SHARED when compiling this
// file.
#undef USING_V8_SHARED
-#include "include/v8.h"
+#include "include/v8config.h"
#if V8_OS_WIN
#include "src/base/win32-headers.h"
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 6e2bacc043..211cf82398 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -4262,14 +4262,34 @@ void LiftoffAssembler::MaybeOSR() {}
void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
ValueKind kind) {
- UNIMPLEMENTED();
+ if (kind == kF32) {
+ FloatRegister src_f = liftoff::GetFloatRegister(src);
+ VFPCompareAndSetFlags(src_f, src_f);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ VFPCompareAndSetFlags(src, src);
+ }
+
+ // Store a non-zero value if src is NaN.
+ str(dst, MemOperand(dst), ne); // x != x iff isnan(x)
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ QwNeonRegister src_q = liftoff::GetSimd128Register(src);
+ QwNeonRegister tmp_q = liftoff::GetSimd128Register(tmp_s128);
+ if (lane_kind == kF32) {
+ vpadd(tmp_q.low(), src_q.low(), src_q.high());
+ LowDwVfpRegister tmp_d =
+ LowDwVfpRegister::from_code(tmp_s128.low_fp().code());
+ vadd(tmp_d.low(), tmp_d.low(), tmp_d.high());
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ vadd(tmp_q.low(), src_q.low(), src_q.high());
+ }
+ emit_set_if_nan(dst, tmp_q.low(), lane_kind);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index a52370f293..1d29ce72bc 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -1173,12 +1173,7 @@ void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
}
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
- UseScratchRegisterScope temps(this);
- VRegister scratch = temps.AcquireV(kFormat8B);
- Fmov(scratch.S(), src.W());
- Cnt(scratch, scratch);
- Addv(scratch.B(), scratch);
- Fmov(dst.W(), scratch.S());
+ PopcntHelper(dst.W(), src.W());
return true;
}
@@ -1193,12 +1188,7 @@ void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- UseScratchRegisterScope temps(this);
- VRegister scratch = temps.AcquireV(kFormat8B);
- Fmov(scratch.D(), src.gp().X());
- Cnt(scratch, scratch);
- Addv(scratch.B(), scratch);
- Fmov(dst.gp().X(), scratch.D());
+ PopcntHelper(dst.gp().X(), src.gp().X());
return true;
}
@@ -1717,13 +1707,13 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
UseScratchRegisterScope temps(this);
MemOperand src_op{
liftoff::GetEffectiveAddress(this, &temps, addr, offset_reg, offset_imm)};
- *protected_load_pc = pc_offset();
MachineType mem_type = type.mem_type();
if (dst != src) {
Mov(dst.fp().Q(), src.fp().Q());
}
+ *protected_load_pc = pc_offset();
if (mem_type == MachineType::Int8()) {
ld1(dst.fp().B(), laneidx, src_op);
} else if (mem_type == MachineType::Int16()) {
@@ -3259,14 +3249,35 @@ void LiftoffAssembler::MaybeOSR() {}
void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
ValueKind kind) {
- UNIMPLEMENTED();
+ Label not_nan;
+ if (kind == kF32) {
+ Fcmp(src.S(), src.S());
+ B(eq, &not_nan); // x != x iff isnan(x)
+ // If it's a NaN, it must be non-zero, so store that as the set value.
+ Str(src.S(), MemOperand(dst));
+ } else {
+ DCHECK_EQ(kind, kF64);
+ Fcmp(src.D(), src.D());
+ B(eq, &not_nan); // x != x iff isnan(x)
+ // Double-precision NaNs must be non-zero in the most-significant 32
+ // bits, so store that.
+ St1(src.V4S(), 1, MemOperand(dst));
+ }
+ Bind(&not_nan);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ DoubleRegister tmp_fp = tmp_s128.fp();
+ if (lane_kind == kF32) {
+ Fmaxv(tmp_fp.S(), src.fp().V4S());
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ Fmaxp(tmp_fp.D(), src.fp().V2D());
+ }
+ emit_set_if_nan(dst, tmp_fp, lane_kind);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index bb2fed83c6..5f92d50f6f 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -2718,40 +2718,6 @@ void EmitSimdShiftOpImm(LiftoffAssembler* assm, LiftoffRegister dst,
}
}
-enum class ShiftSignedness { kSigned, kUnsigned };
-
-template <bool is_signed>
-void EmitI8x16Shr(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister lhs, LiftoffRegister rhs) {
- // Same algorithm is used for both signed and unsigned shifts, the only
- // difference is the actual shift and pack in the end. This is the same
- // algorithm as used in code-generator-ia32.cc
- Register tmp =
- assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
- XMMRegister tmp_simd =
- assm->GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
-
- // Unpack the bytes into words, do logical shifts, and repack.
- assm->Punpckhbw(liftoff::kScratchDoubleReg, lhs.fp());
- assm->Punpcklbw(dst.fp(), lhs.fp());
- assm->mov(tmp, rhs.gp());
- // Take shift value modulo 8.
- assm->and_(tmp, 7);
- assm->add(tmp, Immediate(8));
- assm->Movd(tmp_simd, tmp);
- if (is_signed) {
- assm->Psraw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg,
- tmp_simd);
- assm->Psraw(dst.fp(), dst.fp(), tmp_simd);
- assm->Packsswb(dst.fp(), liftoff::kScratchDoubleReg);
- } else {
- assm->Psrlw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg,
- tmp_simd);
- assm->Psrlw(dst.fp(), dst.fp(), tmp_simd);
- assm->Packuswb(dst.fp(), liftoff::kScratchDoubleReg);
- }
-}
-
inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src) {
Register tmp =
@@ -2809,23 +2775,19 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
}
} else if (transform == LoadTransformationKind::kZeroExtend) {
if (memtype == MachineType::Int32()) {
- movss(dst.fp(), src_op);
+ Movss(dst.fp(), src_op);
} else {
DCHECK_EQ(MachineType::Int64(), memtype);
- movsd(dst.fp(), src_op);
+ Movsd(dst.fp(), src_op);
}
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
- Pinsrb(dst.fp(), src_op, 0);
- Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pshufb(dst.fp(), liftoff::kScratchDoubleReg);
+ S128Load8Splat(dst.fp(), src_op, liftoff::kScratchDoubleReg);
} else if (memtype == MachineType::Int16()) {
- Pinsrw(dst.fp(), src_op, 0);
- Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
- Punpcklqdq(dst.fp(), dst.fp());
+ S128Load16Splat(dst.fp(), src_op, liftoff::kScratchDoubleReg);
} else if (memtype == MachineType::Int32()) {
- Vbroadcastss(dst.fp(), src_op);
+ S128Load32Splat(dst.fp(), src_op);
} else if (memtype == MachineType::Int64()) {
Movddup(dst.fp(), src_op);
}
@@ -2875,12 +2837,7 @@ void LiftoffAssembler::StoreLane(Register dst, Register offset,
S128Store32Lane(dst_op, src.fp(), lane);
} else {
DCHECK_EQ(MachineRepresentation::kWord64, rep);
- if (lane == 0) {
- Movlps(dst_op, src.fp());
- } else {
- DCHECK_EQ(1, lane);
- Movhps(dst_op, src.fp());
- }
+ S128Store64Lane(dst_op, src.fp(), lane);
}
}
@@ -2951,16 +2908,12 @@ void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pshufb(dst.fp(), liftoff::kScratchDoubleReg);
+ I8x16Splat(dst.fp(), src.gp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
- Pshufd(dst.fp(), dst.fp(), uint8_t{0});
+ I16x8Splat(dst.fp(), src.gp());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
@@ -3366,89 +3319,48 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(kI32);
- static constexpr RegClass tmp_simd_rc = reg_class_for(kS128);
- LiftoffRegister tmp = GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(rhs));
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs));
LiftoffRegister tmp_simd =
- GetUnusedRegister(tmp_simd_rc, LiftoffRegList::ForRegs(dst, lhs));
- // Mask off the unwanted bits before word-shifting.
- Pcmpeqw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- mov(tmp.gp(), rhs.gp());
- and_(tmp.gp(), Immediate(7));
- add(tmp.gp(), Immediate(8));
- Movd(tmp_simd.fp(), tmp.gp());
- Psrlw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, tmp_simd.fp());
- Packuswb(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
-
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpand(dst.fp(), lhs.fp(), liftoff::kScratchDoubleReg);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- andps(dst.fp(), liftoff::kScratchDoubleReg);
- }
- sub(tmp.gp(), Immediate(8));
- Movd(tmp_simd.fp(), tmp.gp());
- Psllw(dst.fp(), dst.fp(), tmp_simd.fp());
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs));
+ I8x16Shl(dst.fp(), lhs.fp(), rhs.gp(), tmp.gp(), liftoff::kScratchDoubleReg,
+ tmp_simd.fp());
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(kI32);
- LiftoffRegister tmp = GetUnusedRegister(tmp_rc, {});
- byte shift = static_cast<byte>(rhs & 0x7);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsllw(dst.fp(), lhs.fp(), shift);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- psllw(dst.fp(), shift);
- }
-
- uint8_t bmask = static_cast<uint8_t>(0xff << shift);
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- mov(tmp.gp(), mask);
- Movd(liftoff::kScratchDoubleReg, tmp.gp());
- Pshufd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, uint8_t{0});
- Pand(dst.fp(), liftoff::kScratchDoubleReg);
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ I8x16Shl(dst.fp(), lhs.fp(), rhs, tmp.gp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/true>(this, dst, lhs, rhs);
+ Register tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
+ XMMRegister tmp_simd =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs.gp(), tmp, liftoff::kScratchDoubleReg,
+ tmp_simd);
}
void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- Punpckhbw(liftoff::kScratchDoubleReg, lhs.fp());
- Punpcklbw(dst.fp(), lhs.fp());
- uint8_t shift = (rhs & 7) + 8;
- Psraw(liftoff::kScratchDoubleReg, shift);
- Psraw(dst.fp(), shift);
- Packsswb(dst.fp(), liftoff::kScratchDoubleReg);
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs, liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/false>(this, dst, lhs, rhs);
+ Register tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
+ XMMRegister tmp_simd =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs.gp(), tmp, liftoff::kScratchDoubleReg,
+ tmp_simd);
}
void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
Register tmp = GetUnusedRegister(kGpReg, {}).gp();
- // Perform 16-bit shift, then mask away high bits.
- uint8_t shift = rhs & 7;
- liftoff::EmitSimdShiftOpImm<&Assembler::vpsrlw, &Assembler::psrlw, 3>(
- this, dst, lhs, rhs);
-
- uint8_t bmask = 0xff >> shift;
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- mov(tmp, mask);
- Movd(liftoff::kScratchDoubleReg, tmp);
- Pshufd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, uint8_t{0});
- Pand(dst.fp(), liftoff::kScratchDoubleReg);
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs, tmp, liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -4300,26 +4212,8 @@ void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- // NAN->0
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vcmpeqps(liftoff::kScratchDoubleReg, src.fp(), src.fp());
- vpand(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
- } else {
- movaps(liftoff::kScratchDoubleReg, src.fp());
- cmpeqps(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
- andps(dst.fp(), liftoff::kScratchDoubleReg);
- }
- // Set top bit if >= 0 (but not -0.0!).
- Pxor(liftoff::kScratchDoubleReg, dst.fp());
- // Convert to int.
- Cvttps2dq(dst.fp(), dst.fp());
- // Set top bit if >=0 is now < 0.
- Pand(liftoff::kScratchDoubleReg, dst.fp());
- Psrad(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{31});
- // Set positive overflow lanes to 0x7FFFFFFF.
- Pxor(dst.fp(), liftoff::kScratchDoubleReg);
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ I32x4SConvertF32x4(dst.fp(), src.fp(), liftoff::kScratchDoubleReg, tmp);
}
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
@@ -4787,22 +4681,14 @@ void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
// Since we have more cache registers than parameter registers, the
// {LiftoffCompiler} should always be able to place {target} in a register.
DCHECK(target.is_valid());
- if (FLAG_untrusted_code_mitigations) {
- RetpolineCall(target);
- } else {
- call(target);
- }
+ call(target);
}
void LiftoffAssembler::TailCallIndirect(Register target) {
// Since we have more cache registers than parameter registers, the
// {LiftoffCompiler} should always be able to place {target} in a register.
DCHECK(target.is_valid());
- if (FLAG_untrusted_code_mitigations) {
- RetpolineJump(target);
- } else {
- jmp(target);
- }
+ jmp(target);
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
@@ -4836,19 +4722,19 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
bind(&ret);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
if (lane_kind == kF32) {
- movaps(tmp_fp, src);
- cmpunordps(tmp_fp, tmp_fp);
+ movaps(tmp_s128.fp(), src.fp());
+ cmpunordps(tmp_s128.fp(), tmp_s128.fp());
} else {
DCHECK_EQ(lane_kind, kF64);
- movapd(tmp_fp, src);
- cmpunordpd(tmp_fp, tmp_fp);
+ movapd(tmp_s128.fp(), src.fp());
+ cmpunordpd(tmp_s128.fp(), tmp_s128.fp());
}
- pmovmskb(tmp_gp, tmp_fp);
+ pmovmskb(tmp_gp, tmp_s128.fp());
or_(Operand(dst, 0), tmp_gp);
}
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
index d445655dca..5b43a2a41d 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -46,6 +46,18 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs =
constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26);
+#elif V8_TARGET_ARCH_LOONG64
+
+// t6-t8 and s3-s4: scratch registers, s6: root
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3, t4, t5, s0,
+ s1, s2, s5, s7, s8);
+
+// f29: zero, f30-f31: macro-assembler scratch float Registers.
+constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
+ f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16,
+ f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
+
#elif V8_TARGET_ARCH_ARM
// r10: root, r11: fp, r12: ip, r13: sp, r14: lr, r15: pc.
@@ -95,8 +107,8 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs =
// Any change of kLiftoffAssemblerGpCacheRegs also need to update
// kPushedFpRegs in frame-constants-riscv64.h
constexpr RegList kLiftoffAssemblerFpCacheRegs =
- DoubleRegister::ListOf(ft0, ft1, ft2, ft3, ft4, ft5, ft6, ft7, fa0, fa1,
- fa2, fa3, fa4, fa5, fa6, fa7, ft8, ft9, ft10, ft11);
+ DoubleRegister::ListOf(ft1, ft2, ft3, ft4, ft5, ft6, ft7, fa0, fa1, fa2,
+ fa3, fa4, fa5, fa6, fa7, ft8, ft9, ft10, ft11);
#else
constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 19611fb0ee..c94c7ece9e 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -1456,12 +1456,12 @@ class LiftoffAssembler : public TurboAssembler {
// Instrumentation for shadow-stack-compatible OSR on x64.
inline void MaybeOSR();
- // Set the i32 at address dst to 1 if src is a NaN.
+ // Set the i32 at address dst to a non-zero value if src is a NaN.
inline void emit_set_if_nan(Register dst, DoubleRegister src, ValueKind kind);
// Set the i32 at address dst to a non-zero value if src contains a NaN.
- inline void emit_s128_set_if_nan(Register dst, DoubleRegister src,
- Register tmp_gp, DoubleRegister tmp_fp,
+ inline void emit_s128_set_if_nan(Register dst, LiftoffRegister src,
+ Register tmp_gp, LiftoffRegister tmp_s128,
ValueKind lane_kind);
////////////////////////////////////
@@ -1711,6 +1711,8 @@ bool CheckCompatibleStackSlotTypes(ValueKind a, ValueKind b);
#include "src/wasm/baseline/mips/liftoff-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/wasm/baseline/loong64/liftoff-assembler-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/wasm/baseline/s390/liftoff-assembler-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index eeed531cf8..65226ab408 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -306,7 +306,7 @@ void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
// Some externally maintained architectures don't fully implement Liftoff yet.
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_S390X || \
- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64
return;
#endif
@@ -2808,30 +2808,6 @@ class LiftoffCompiler {
__ DeallocateStackSlot(sizeof(MemoryTracingInfo));
}
- Register AddMemoryMasking(Register index, uintptr_t* offset,
- LiftoffRegList* pinned) {
- if (!FLAG_untrusted_code_mitigations ||
- env_->bounds_checks == kTrapHandler) {
- return index;
- }
- CODE_COMMENT("mask memory index");
- // Make sure that we can overwrite {index}.
- if (__ cache_state()->is_used(LiftoffRegister(index))) {
- Register old_index = index;
- pinned->clear(LiftoffRegister{old_index});
- index = pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
- if (index != old_index) {
- __ Move(index, old_index, kPointerKind);
- }
- }
- Register tmp = __ GetUnusedRegister(kGpReg, *pinned).gp();
- LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize, *pinned);
- if (*offset) __ emit_ptrsize_addi(index, index, *offset);
- __ emit_ptrsize_and(index, index, tmp);
- *offset = 0;
- return index;
- }
-
bool IndexStaticallyInBounds(const LiftoffAssembler::VarState& index_slot,
int access_size, uintptr_t* offset) {
if (!index_slot.is_const()) return false;
@@ -2892,7 +2868,6 @@ class LiftoffCompiler {
CODE_COMMENT("load from memory");
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
- index = AddMemoryMasking(index, &offset, &pinned);
// Load the memory start address only now to reduce register pressure
// (important on ia32).
@@ -2937,7 +2912,6 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("load with transformation");
Register addr = GetMemoryStart(pinned);
LiftoffRegister value = __ GetUnusedRegister(reg_class_for(kS128), {});
@@ -2977,7 +2951,6 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
pinned.set(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("load lane");
Register addr = GetMemoryStart(pinned);
LiftoffRegister result = __ GetUnusedRegister(reg_class_for(kS128), {});
@@ -3023,7 +2996,6 @@ class LiftoffCompiler {
if (index == no_reg) return;
pinned.set(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("store to memory");
uint32_t protected_store_pc = 0;
// Load the memory start address only now to reduce register pressure
@@ -3058,7 +3030,6 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
pinned.set(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("store lane to memory");
Register addr = pinned.set(GetMemoryStart(pinned));
uint32_t protected_store_pc = 0;
@@ -4340,7 +4311,6 @@ class LiftoffCompiler {
pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("atomic store to memory");
Register addr = pinned.set(GetMemoryStart(pinned));
LiftoffRegList outer_pinned;
@@ -4363,7 +4333,6 @@ class LiftoffCompiler {
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("atomic load from memory");
Register addr = pinned.set(GetMemoryStart(pinned));
RegClass rc = reg_class_for(kind);
@@ -4411,7 +4380,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(GetMemoryStart(pinned));
(asm_.*emit_fn)(addr, index, offset, value, result, type);
@@ -4434,7 +4402,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
__ emit_i32_add(addr, addr, index);
@@ -4467,7 +4434,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(GetMemoryStart(pinned));
LiftoffRegister result =
pinned.set(__ GetUnusedRegister(reg_class_for(result_kind), pinned));
@@ -4514,7 +4480,6 @@ class LiftoffCompiler {
pinned);
uintptr_t offset = imm.offset;
- index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
Register index_plus_offset =
__ cache_state()->is_used(LiftoffRegister(index_reg))
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
@@ -4531,8 +4496,7 @@ class LiftoffCompiler {
__ cache_state()->stack_state.end()[-2];
LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-3];
- // We have to set the correct register for the index. It may have changed
- // above in {AddMemoryMasking}.
+ // We have to set the correct register for the index.
index.MakeRegister(LiftoffRegister(index_plus_offset));
static constexpr WasmCode::RuntimeStubId kTargets[2][2]{
@@ -4562,7 +4526,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, kInt32Size, imm.offset, index_reg, pinned);
uintptr_t offset = imm.offset;
- index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
Register index_plus_offset =
__ cache_state()->is_used(LiftoffRegister(index_reg))
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
@@ -5055,7 +5018,7 @@ class LiftoffCompiler {
Label* trap_label =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapArrayTooLarge);
__ emit_i32_cond_jumpi(kUnsignedGreaterThan, trap_label, length.gp(),
- static_cast<int>(wasm::kV8MaxWasmArrayLength));
+ WasmArray::MaxLength(imm.array_type));
}
ValueKind elem_kind = imm.array_type->element_type().kind();
int elem_size = element_size_bytes(elem_kind);
@@ -5184,6 +5147,8 @@ class LiftoffCompiler {
void ArrayCopy(FullDecoder* decoder, const Value& dst, const Value& dst_index,
const Value& src, const Value& src_index,
const Value& length) {
+ // TODO(7748): Unify implementation with TF: Implement this with
+ // GenerateCCall. Remove runtime function and builtin in wasm.tq.
CallRuntimeStub(WasmCode::kWasmArrayCopyWithChecks,
MakeSig::Params(kI32, kI32, kI32, kOptRef, kOptRef),
// Builtin parameter order:
@@ -5778,28 +5743,6 @@ class LiftoffCompiler {
__ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kI32, index,
tmp_const);
- // Mask the index to prevent SSCA.
- if (FLAG_untrusted_code_mitigations) {
- CODE_COMMENT("Mask indirect call index");
- // mask = ((index - size) & ~index) >> 31
- // Reuse allocated registers; note: size is still stored in {tmp_const}.
- Register diff = table;
- Register neg_index = tmp_const;
- Register mask = scratch;
- // 1) diff = index - size
- __ emit_i32_sub(diff, index, tmp_const);
- // 2) neg_index = ~index
- __ LoadConstant(LiftoffRegister(neg_index), WasmValue(int32_t{-1}));
- __ emit_i32_xor(neg_index, neg_index, index);
- // 3) mask = diff & neg_index
- __ emit_i32_and(mask, diff, neg_index);
- // 4) mask = mask >> 31
- __ emit_i32_sari(mask, mask, 31);
-
- // Apply mask.
- __ emit_i32_and(index, index, mask);
- }
-
CODE_COMMENT("Check indirect call signature");
// Load the signature from {instance->ift_sig_ids[key]}
if (imm.table_imm.index == 0) {
@@ -6151,14 +6094,14 @@ class LiftoffCompiler {
ValueKind lane_kind) {
RegClass rc = reg_class_for(kS128);
LiftoffRegister tmp_gp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LiftoffRegister tmp_fp = pinned.set(__ GetUnusedRegister(rc, pinned));
+ LiftoffRegister tmp_s128 = pinned.set(__ GetUnusedRegister(rc, pinned));
LiftoffRegister nondeterminism_addr =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(
nondeterminism_addr,
WasmValue::ForUintPtr(reinterpret_cast<uintptr_t>(nondeterminism_)));
- __ emit_s128_set_if_nan(nondeterminism_addr.gp(), dst.fp(), tmp_gp.gp(),
- tmp_fp.fp(), lane_kind);
+ __ emit_s128_set_if_nan(nondeterminism_addr.gp(), dst, tmp_gp.gp(),
+ tmp_s128, lane_kind);
}
static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
diff --git a/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
new file mode 100644
index 0000000000..f22e013601
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
@@ -0,0 +1,2817 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_
+#define V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_
+
+#include "src/base/platform/wrappers.h"
+#include "src/codegen/machine-type.h"
+#include "src/heap/memory-chunk.h"
+#include "src/wasm/baseline/liftoff-assembler.h"
+#include "src/wasm/wasm-objects.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace liftoff {
+
+inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ return eq;
+ case kUnequal:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedLessEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kSignedGreaterEqual:
+ return ge;
+ case kUnsignedLessThan:
+ return ult;
+ case kUnsignedLessEqual:
+ return ule;
+ case kUnsignedGreaterThan:
+ return ugt;
+ case kUnsignedGreaterEqual:
+ return uge;
+ }
+}
+
+// Liftoff Frames.
+//
+// slot Frame
+// +--------------------+---------------------------
+// n+4 | optional padding slot to keep the stack 16 byte aligned.
+// n+3 | parameter n |
+// ... | ... |
+// 4 | parameter 1 | or parameter 2
+// 3 | parameter 0 | or parameter 1
+// 2 | (result address) | or parameter 0
+// -----+--------------------+---------------------------
+// 1 | return addr (ra) |
+// 0 | previous frame (fp)|
+// -----+--------------------+ <-- frame ptr (fp)
+// -1 | 0xa: WASM |
+// -2 | instance |
+// -----+--------------------+---------------------------
+// -3 | slot 0 | ^
+// -4 | slot 1 | |
+// | | Frame slots
+// | | |
+// | | v
+// | optional padding slot to keep the stack 16 byte aligned.
+// -----+--------------------+ <-- stack ptr (sp)
+//
+
+// fp-8 holds the stack marker, fp-16 is the instance parameter.
+constexpr int kInstanceOffset = 16;
+
+inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
+
+inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
+
+template <typename T>
+inline MemOperand GetMemOp(LiftoffAssembler* assm, Register addr,
+ Register offset, T offset_imm) {
+ if (is_int32(offset_imm)) {
+ int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
+ if (offset == no_reg) return MemOperand(addr, offset_imm32);
+ assm->add_d(kScratchReg, addr, offset);
+ return MemOperand(kScratchReg, offset_imm32);
+ }
+ // Offset immediate does not fit in 31 bits.
+ assm->li(kScratchReg, Operand(offset_imm));
+ assm->add_d(kScratchReg, kScratchReg, addr);
+ if (offset != no_reg) {
+ assm->add_d(kScratchReg, kScratchReg, offset);
+ }
+ return MemOperand(kScratchReg, 0);
+}
+
+inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
+ ValueKind kind) {
+ switch (kind) {
+ case kI32:
+ assm->Ld_w(dst.gp(), src);
+ break;
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ assm->Ld_d(dst.gp(), src);
+ break;
+ case kF32:
+ assm->Fld_s(dst.fp(), src);
+ break;
+ case kF64:
+ assm->Fld_d(dst.fp(), src);
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
+ LiftoffRegister src, ValueKind kind) {
+ MemOperand dst(base, offset);
+ switch (kind) {
+ case kI32:
+ assm->St_w(src.gp(), dst);
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
+ assm->St_d(src.gp(), dst);
+ break;
+ case kF32:
+ assm->Fst_s(src.fp(), dst);
+ break;
+ case kF64:
+ assm->Fst_d(src.fp(), dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
+ switch (kind) {
+ case kI32:
+ assm->addi_d(sp, sp, -kSystemPointerSize);
+ assm->St_w(reg.gp(), MemOperand(sp, 0));
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ assm->Push(reg.gp());
+ break;
+ case kF32:
+ assm->addi_d(sp, sp, -kSystemPointerSize);
+ assm->Fst_s(reg.fp(), MemOperand(sp, 0));
+ break;
+ case kF64:
+ assm->addi_d(sp, sp, -kSystemPointerSize);
+ assm->Fst_d(reg.fp(), MemOperand(sp, 0));
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // namespace liftoff
+
+int LiftoffAssembler::PrepareStackFrame() {
+ int offset = pc_offset();
+ // When constant that represents size of stack frame can't be represented
+ // as 16bit we need three instructions to add it to sp, so we reserve space
+ // for this case.
+ addi_d(sp, sp, 0);
+ nop();
+ nop();
+ return offset;
+}
+
+void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
+ int stack_param_delta) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+
+ // Push the return address and frame pointer to complete the stack frame.
+ Ld_d(scratch, MemOperand(fp, 8));
+ Push(scratch);
+ Ld_d(scratch, MemOperand(fp, 0));
+ Push(scratch);
+
+ // Shift the whole frame upwards.
+ int slot_count = num_callee_stack_params + 2;
+ for (int i = slot_count - 1; i >= 0; --i) {
+ Ld_d(scratch, MemOperand(sp, i * 8));
+ St_d(scratch, MemOperand(fp, (i - stack_param_delta) * 8));
+ }
+
+ // Set the new stack and frame pointer.
+ addi_d(sp, fp, -stack_param_delta * 8);
+ Pop(ra, fp);
+}
+
+void LiftoffAssembler::AlignFrameSize() {}
+
+void LiftoffAssembler::PatchPrepareStackFrame(
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
+ // The frame_size includes the frame marker and the instance slot. Both are
+ // pushed as part of frame construction, so we don't need to allocate memory
+ // for them anymore.
+ int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
+
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 256;
+ TurboAssembler patching_assembler(
+ nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
+
+ if (V8_LIKELY(frame_size < 4 * KB)) {
+ // This is the standard case for small frames: just subtract from SP and be
+ // done with it.
+ patching_assembler.Add_d(sp, sp, Operand(-frame_size));
+ return;
+ }
+
+ // The frame size is bigger than 4KB, so we might overflow the available stack
+ // space if we first allocate the frame and then do the stack check (we will
+ // need some remaining stack space for throwing the exception). That's why we
+ // check the available stack space before we allocate the frame. To do this we
+ // replace the {__ Add_d(sp, sp, -frame_size)} with a jump to OOL code that
+ // does this "extended stack check".
+ //
+ // The OOL code can simply be generated here with the normal assembler,
+ // because all other code generation, including OOL code, has already finished
+ // when {PatchPrepareStackFrame} is called. The function prologue then jumps
+ // to the current {pc_offset()} to execute the OOL code for allocating the
+ // large frame.
+ // Emit the unconditional branch in the function prologue (from {offset} to
+ // {pc_offset()}).
+
+ int imm32 = pc_offset() - offset;
+ CHECK(is_int26(imm32));
+ patching_assembler.b(imm32 >> 2);
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ RecordComment("OOL: stack check for large frame");
+ Label continuation;
+ if (frame_size < FLAG_stack_size * 1024) {
+ Register stack_limit = kScratchReg;
+ Ld_d(stack_limit,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ Ld_d(stack_limit, MemOperand(stack_limit, 0));
+ Add_d(stack_limit, stack_limit, Operand(frame_size));
+ Branch(&continuation, uge, sp, Operand(stack_limit));
+ }
+
+ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call will not return; just define an empty safepoint.
+ safepoint_table_builder->DefineSafepoint(this);
+ if (FLAG_debug_code) stop();
+
+ bind(&continuation);
+
+ // Now allocate the stack space. Note that this might do more than just
+ // decrementing the SP;
+ Add_d(sp, sp, Operand(-frame_size));
+
+ // Jump back to the start of the function, from {pc_offset()} to
+ // right after the reserved space for the {__ Add_d(sp, sp, -framesize)}
+ // (which is a Branch now).
+ int func_start_offset = offset + 3 * kInstrSize;
+ imm32 = func_start_offset - pc_offset();
+ CHECK(is_int26(imm32));
+ b(imm32 >> 2);
+}
+
+void LiftoffAssembler::FinishCode() {}
+
+void LiftoffAssembler::AbortCompilation() {}
+
+// static
+constexpr int LiftoffAssembler::StaticStackFrameSize() {
+ return liftoff::kInstanceOffset;
+}
+
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ switch (kind) {
+ case kS128:
+ return element_size_bytes(kind);
+ default:
+ return kStackSlotSize;
+ }
+}
+
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return kind == kS128 || is_reference(kind);
+}
+
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ switch (value.type().kind()) {
+ case kI32:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
+ break;
+ case kI64:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
+ break;
+ case kF32:
+ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ break;
+ case kF64:
+ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
+ Ld_d(dst, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
+ DCHECK_LE(0, offset);
+ switch (size) {
+ case 1:
+ Ld_b(dst, MemOperand(instance, offset));
+ break;
+ case 4:
+ Ld_w(dst, MemOperand(instance, offset));
+ break;
+ case 8:
+ Ld_d(dst, MemOperand(instance, offset));
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
+ int32_t offset) {
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ Ld_d(dst, MemOperand(instance, offset));
+}
+
+void LiftoffAssembler::SpillInstance(Register instance) {
+ St_d(instance, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::ResetOSRTarget() {}
+
+void LiftoffAssembler::FillInstanceInto(Register dst) {
+ Ld_d(dst, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ int32_t offset_imm,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt64Size);
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+ Ld_d(dst, src_op);
+}
+
+void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
+ int32_t offset_imm) {
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, no_reg, offset_imm);
+ Ld_d(dst, src_op);
+}
+
+void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
+ Register offset_reg,
+ int32_t offset_imm,
+ LiftoffRegister src,
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
+ UseScratchRegisterScope temps(this);
+ Operand offset_op =
+ offset_reg.is_valid() ? Operand(offset_reg) : Operand(offset_imm);
+ // For the write barrier (below), we cannot have both an offset register and
+ // an immediate offset. Add them to a 32-bit offset initially, but in a 64-bit
+ // register, because that's needed in the MemOperand below.
+ if (offset_reg.is_valid() && offset_imm) {
+ Register effective_offset = temps.Acquire();
+ Add_d(effective_offset, offset_reg, Operand(offset_imm));
+ offset_op = Operand(effective_offset);
+ }
+ if (offset_op.is_reg()) {
+ St_d(src.gp(), MemOperand(dst_addr, offset_op.rm()));
+ } else {
+ St_d(src.gp(), MemOperand(dst_addr, offset_imm));
+ }
+
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
+
+ Label write_barrier;
+ Label exit;
+ CheckPageFlag(dst_addr, MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ &write_barrier);
+ b(&exit);
+ bind(&write_barrier);
+ JumpIfSmi(src.gp(), &exit);
+ CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ &exit);
+ CallRecordWriteStubSaveRegisters(
+ dst_addr, offset_op, RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
+ StubCallMode::kCallWasmRuntimeStub);
+ bind(&exit);
+}
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc, bool is_load_mem,
+ bool i64_offset) {
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
+ Ld_bu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load8S:
+ case LoadType::kI64Load8S:
+ Ld_b(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ TurboAssembler::Ld_hu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16S:
+ case LoadType::kI64Load16S:
+ TurboAssembler::Ld_h(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load32U:
+ TurboAssembler::Ld_wu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32S:
+ TurboAssembler::Ld_w(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load:
+ TurboAssembler::Ld_d(dst.gp(), src_op);
+ break;
+ case LoadType::kF32Load:
+ TurboAssembler::Fld_s(dst.fp(), src_op);
+ break;
+ case LoadType::kF64Load:
+ TurboAssembler::Fld_d(dst.fp(), src_op);
+ break;
+ case LoadType::kS128Load:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc, bool is_store_mem) {
+ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8:
+ St_b(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16:
+ TurboAssembler::St_h(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32:
+ TurboAssembler::St_w(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store:
+ TurboAssembler::St_d(src.gp(), dst_op);
+ break;
+ case StoreType::kF32Store:
+ TurboAssembler::Fst_s(src.fp(), dst_op);
+ break;
+ case StoreType::kF64Store:
+ TurboAssembler::Fst_d(src.fp(), dst_op);
+ break;
+ case StoreType::kS128Store:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type, LiftoffRegList pinned) {
+ bailout(kAtomics, "AtomicLoad");
+}
+
+void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned) {
+ bailout(kAtomics, "AtomicStore");
+}
+
+void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicAdd");
+}
+
+void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicSub");
+}
+
+void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicAnd");
+}
+
+void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicOr");
+}
+
+void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicXor");
+}
+
+void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm,
+ LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicExchange");
+}
+
+void LiftoffAssembler::AtomicCompareExchange(
+ Register dst_addr, Register offset_reg, uintptr_t offset_imm,
+ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
+ StoreType type) {
+ bailout(kAtomics, "AtomicCompareExchange");
+}
+
+void LiftoffAssembler::AtomicFence() { dbar(0); }
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx,
+ ValueKind kind) {
+ MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1));
+ liftoff::Load(this, dst, src, kind);
+}
+
+void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
+ uint32_t caller_slot_idx,
+ ValueKind kind) {
+ int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
+ liftoff::Store(this, fp, offset, src, kind);
+}
+
+void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
+ ValueKind kind) {
+ liftoff::Load(this, dst, MemOperand(sp, offset), kind);
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
+ ValueKind kind) {
+ DCHECK_NE(dst_offset, src_offset);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(kind), {});
+ Fill(reg, src_offset, kind);
+ Spill(dst_offset, reg, kind);
+}
+
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
+ DCHECK_NE(dst, src);
+ // TODO(ksreten): Handle different sizes here.
+ TurboAssembler::Move(dst, src);
+}
+
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueKind kind) {
+ DCHECK_NE(dst, src);
+ if (kind != kS128) {
+ TurboAssembler::Move(dst, src);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
+ RecordUsedSpillOffset(offset);
+ MemOperand dst = liftoff::GetStackSlot(offset);
+ switch (kind) {
+ case kI32:
+ St_w(reg.gp(), dst);
+ break;
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ St_d(reg.gp(), dst);
+ break;
+ case kF32:
+ Fst_s(reg.fp(), dst);
+ break;
+ case kF64:
+ TurboAssembler::Fst_d(reg.fp(), dst);
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Spill(int offset, WasmValue value) {
+ RecordUsedSpillOffset(offset);
+ MemOperand dst = liftoff::GetStackSlot(offset);
+ switch (value.type().kind()) {
+ case kI32: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
+ St_w(tmp.gp(), dst);
+ break;
+ }
+ case kI64:
+ case kRef:
+ case kOptRef: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ TurboAssembler::li(tmp.gp(), value.to_i64());
+ St_d(tmp.gp(), dst);
+ break;
+ }
+ default:
+ // kWasmF32 and kWasmF64 are unreachable, since those
+ // constants are not tracked.
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
+ MemOperand src = liftoff::GetStackSlot(offset);
+ switch (kind) {
+ case kI32:
+ Ld_w(reg.gp(), src);
+ break;
+ case kI64:
+ case kRef:
+ case kOptRef:
+ // TODO(LOONG_dev): LOONG64 Check, MIPS64 dosn't need, ARM64/LOONG64 need?
+ case kRtt:
+ case kRttWithDepth:
+ Ld_d(reg.gp(), src);
+ break;
+ case kF32:
+ Fld_s(reg.fp(), src);
+ break;
+ case kF64:
+ TurboAssembler::Fld_d(reg.fp(), src);
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
+ UNREACHABLE();
+}
+
+void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
+ DCHECK_LT(0, size);
+ RecordUsedSpillOffset(start + size);
+
+ if (size <= 12 * kStackSlotSize) {
+ // Special straight-line code for up to 12 slots. Generates one
+ // instruction per slot (<= 12 instructions total).
+ uint32_t remainder = size;
+ for (; remainder >= kStackSlotSize; remainder -= kStackSlotSize) {
+ St_d(zero_reg, liftoff::GetStackSlot(start + remainder));
+ }
+ DCHECK(remainder == 4 || remainder == 0);
+ if (remainder) {
+ St_w(zero_reg, liftoff::GetStackSlot(start + remainder));
+ }
+ } else {
+ // General case for bigger counts (12 instructions).
+ // Use a0 for start address (inclusive), a1 for end address (exclusive).
+ Push(a1, a0);
+ Add_d(a0, fp, Operand(-start - size));
+ Add_d(a1, fp, Operand(-start));
+
+ Label loop;
+ bind(&loop);
+ St_d(zero_reg, MemOperand(a0, 0));
+ addi_d(a0, a0, kSystemPointerSize);
+ BranchShort(&loop, ne, a0, Operand(a1));
+
+ Pop(a1, a0);
+ }
+}
+
+void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
+ TurboAssembler::Clz_d(dst.gp(), src.gp());
+}
+
+void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
+ TurboAssembler::Ctz_d(dst.gp(), src.gp());
+}
+
+bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ TurboAssembler::Popcnt_d(dst.gp(), src.gp());
+ return true;
+}
+
+void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
+ TurboAssembler::Mul_w(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero,
+ Label* trap_div_unrepresentable) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+
+ // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
+ TurboAssembler::li(kScratchReg, 1);
+ TurboAssembler::li(kScratchReg2, 1);
+ TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq);
+ TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq);
+ add_d(kScratchReg, kScratchReg, kScratchReg2);
+ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ Operand(zero_reg));
+
+ TurboAssembler::Div_w(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Div_wu(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Mod_w(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Mod_wu(dst, lhs, rhs);
+}
+
+#define I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ instruction(dst, lhs, rhs); \
+ }
+
+// clang-format off
+I32_BINOP(add, add_w)
+I32_BINOP(sub, sub_w)
+I32_BINOP(and, and_)
+I32_BINOP(or, or_)
+I32_BINOP(xor, xor_)
+// clang-format on
+
+#undef I32_BINOP
+
+#define I32_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \
+ int32_t imm) { \
+ instruction(dst, lhs, Operand(imm)); \
+ }
+
+// clang-format off
+I32_BINOP_I(add, Add_w)
+I32_BINOP_I(sub, Sub_w)
+I32_BINOP_I(and, And)
+I32_BINOP_I(or, Or)
+I32_BINOP_I(xor, Xor)
+// clang-format on
+
+#undef I32_BINOP_I
+
+void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
+ TurboAssembler::Clz_w(dst, src);
+}
+
+void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
+ TurboAssembler::Ctz_w(dst, src);
+}
+
+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
+ TurboAssembler::Popcnt_w(dst, src);
+ return true;
+}
+
+#define I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \
+ Register amount) { \
+ instruction(dst, src, amount); \
+ }
+#define I32_SHIFTOP_I(name, instruction, instruction1) \
+ I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \
+ int amount) { \
+ instruction1(dst, src, amount & 0x1f); \
+ }
+
+I32_SHIFTOP_I(shl, sll_w, slli_w)
+I32_SHIFTOP_I(sar, sra_w, srai_w)
+I32_SHIFTOP_I(shr, srl_w, srli_w)
+
+#undef I32_SHIFTOP
+#undef I32_SHIFTOP_I
+
+void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
+ int64_t imm) {
+ TurboAssembler::Add_d(dst.gp(), lhs.gp(), Operand(imm));
+}
+
+void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ TurboAssembler::Mul_d(dst.gp(), lhs.gp(), rhs.gp());
+}
+
+bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero,
+ Label* trap_div_unrepresentable) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+
+ // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable.
+ TurboAssembler::li(kScratchReg, 1);
+ TurboAssembler::li(kScratchReg2, 1);
+ TurboAssembler::LoadZeroOnCondition(
+ kScratchReg, lhs.gp(), Operand(std::numeric_limits<int64_t>::min()), eq);
+ TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq);
+ add_d(kScratchReg, kScratchReg, kScratchReg2);
+ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ Operand(zero_reg));
+
+ TurboAssembler::Div_d(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Div_du(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Mod_d(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Mod_du(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+#define I64_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name( \
+ LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ instruction(dst.gp(), lhs.gp(), rhs.gp()); \
+ }
+
+// clang-format off
+I64_BINOP(add, Add_d)
+I64_BINOP(sub, Sub_d)
+I64_BINOP(and, and_)
+I64_BINOP(or, or_)
+I64_BINOP(xor, xor_)
+// clang-format on
+
+#undef I64_BINOP
+
+#define I64_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name##i( \
+ LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
+ instruction(dst.gp(), lhs.gp(), Operand(imm)); \
+ }
+
+// clang-format off
+I64_BINOP_I(and, And)
+I64_BINOP_I(or, Or)
+I64_BINOP_I(xor, Xor)
+// clang-format on
+
+#undef I64_BINOP_I
+
+#define I64_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name( \
+ LiftoffRegister dst, LiftoffRegister src, Register amount) { \
+ instruction(dst.gp(), src.gp(), amount); \
+ }
+#define I64_SHIFTOP_I(name, instruction, instructioni) \
+ I64_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name##i(LiftoffRegister dst, \
+ LiftoffRegister src, int amount) { \
+ instructioni(dst.gp(), src.gp(), amount & 63); \
+ }
+
+I64_SHIFTOP_I(shl, sll_d, slli_d)
+I64_SHIFTOP_I(sar, sra_d, srai_d)
+I64_SHIFTOP_I(shr, srl_d, srli_d)
+
+#undef I64_SHIFTOP
+#undef I64_SHIFTOP_I
+
+void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
+ bstrpick_d(dst, src, 31, 0);
+}
+
+void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
+ TurboAssembler::Neg_s(dst, src);
+}
+
+void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
+ TurboAssembler::Neg_d(dst, src);
+}
+
+void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float32Min(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float32Max(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ bailout(kComplexOperation, "f32_copysign");
+}
+
+void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float64Min(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float64Max(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ bailout(kComplexOperation, "f64_copysign");
+}
+
+#define FP_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ instruction(dst, lhs, rhs); \
+ }
+#define FP_UNOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ instruction(dst, src); \
+ }
+#define FP_UNOP_RETURN_TRUE(name, instruction) \
+ bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ instruction(dst, src); \
+ return true; \
+ }
+
+FP_BINOP(f32_add, fadd_s)
+FP_BINOP(f32_sub, fsub_s)
+FP_BINOP(f32_mul, fmul_s)
+FP_BINOP(f32_div, fdiv_s)
+FP_UNOP(f32_abs, fabs_s)
+FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s)
+FP_UNOP_RETURN_TRUE(f32_floor, Floor_s)
+FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s)
+FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s)
+FP_UNOP(f32_sqrt, fsqrt_s)
+FP_BINOP(f64_add, fadd_d)
+FP_BINOP(f64_sub, fsub_d)
+FP_BINOP(f64_mul, fmul_d)
+FP_BINOP(f64_div, fdiv_d)
+FP_UNOP(f64_abs, fabs_d)
+FP_UNOP_RETURN_TRUE(f64_ceil, Ceil_d)
+FP_UNOP_RETURN_TRUE(f64_floor, Floor_d)
+FP_UNOP_RETURN_TRUE(f64_trunc, Trunc_d)
+FP_UNOP_RETURN_TRUE(f64_nearest_int, Round_d)
+FP_UNOP(f64_sqrt, fsqrt_d)
+
+#undef FP_BINOP
+#undef FP_UNOP
+#undef FP_UNOP_RETURN_TRUE
+
+bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
+ LiftoffRegister dst,
+ LiftoffRegister src, Label* trap) {
+ switch (opcode) {
+ case kExprI32ConvertI64:
+ TurboAssembler::bstrpick_w(dst.gp(), src.gp(), 31, 0);
+ return true;
+ case kExprI32SConvertF32: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_s(rounded.fp(), src.fp());
+ ftintrz_w_s(kScratchDoubleReg, rounded.fp());
+ movfr2gr_s(dst.gp(), kScratchDoubleReg);
+ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
+ // because INT32_MIN allows easier out-of-bounds detection.
+ TurboAssembler::Add_w(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+
+ // Checking if trap.
+ movgr2fr_w(kScratchDoubleReg, dst.gp());
+ ffint_s_w(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32UConvertF32: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_s(rounded.fp(), src.fp());
+ TurboAssembler::Ftintrz_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg);
+ // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
+ // because 0 allows easier out-of-bounds detection.
+ TurboAssembler::Add_w(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg);
+
+ // Checking if trap.
+ TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp());
+ fcvt_s_d(converted_back.fp(), converted_back.fp());
+ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32SConvertF64: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_d(rounded.fp(), src.fp());
+ ftintrz_w_d(kScratchDoubleReg, rounded.fp());
+ movfr2gr_s(dst.gp(), kScratchDoubleReg);
+
+ // Checking if trap.
+ ffint_d_w(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32UConvertF64: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_d(rounded.fp(), src.fp());
+ TurboAssembler::Ftintrz_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg);
+
+ // Checking if trap.
+ TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp());
+ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32ReinterpretF32:
+ TurboAssembler::FmoveLow(dst.gp(), src.fp());
+ return true;
+ case kExprI64SConvertI32:
+ slli_w(dst.gp(), src.gp(), 0);
+ return true;
+ case kExprI64UConvertI32:
+ TurboAssembler::bstrpick_d(dst.gp(), src.gp(), 31, 0);
+ return true;
+ case kExprI64SConvertF32: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_s(rounded.fp(), src.fp());
+ ftintrz_l_s(kScratchDoubleReg, rounded.fp());
+ movfr2gr_d(dst.gp(), kScratchDoubleReg);
+ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
+ // because INT64_MIN allows easier out-of-bounds detection.
+ TurboAssembler::Add_d(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+
+ // Checking if trap.
+ movgr2fr_d(kScratchDoubleReg, dst.gp());
+ ffint_s_l(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI64UConvertF32: {
+ // Real conversion.
+ TurboAssembler::Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg,
+ kScratchReg);
+
+ // Checking if trap.
+ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+ return true;
+ }
+ case kExprI64SConvertF64: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_d(rounded.fp(), src.fp());
+ ftintrz_l_d(kScratchDoubleReg, rounded.fp());
+ movfr2gr_d(dst.gp(), kScratchDoubleReg);
+ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
+ // because INT64_MIN allows easier out-of-bounds detection.
+ TurboAssembler::Add_d(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+
+ // Checking if trap.
+ movgr2fr_d(kScratchDoubleReg, dst.gp());
+ ffint_d_l(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI64UConvertF64: {
+ // Real conversion.
+ TurboAssembler::Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg,
+ kScratchReg);
+
+ // Checking if trap.
+ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+ return true;
+ }
+ case kExprI64ReinterpretF64:
+ movfr2gr_d(dst.gp(), src.fp());
+ return true;
+ case kExprF32SConvertI32: {
+ LiftoffRegister scratch =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ movgr2fr_w(scratch.fp(), src.gp());
+ ffint_s_w(dst.fp(), scratch.fp());
+ return true;
+ }
+ case kExprF32UConvertI32:
+ TurboAssembler::Ffint_s_uw(dst.fp(), src.gp());
+ return true;
+ case kExprF32ConvertF64:
+ fcvt_s_d(dst.fp(), src.fp());
+ return true;
+ case kExprF32ReinterpretI32:
+ TurboAssembler::FmoveLow(dst.fp(), src.gp());
+ return true;
+ case kExprF64SConvertI32: {
+ LiftoffRegister scratch =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ movgr2fr_w(scratch.fp(), src.gp());
+ ffint_d_w(dst.fp(), scratch.fp());
+ return true;
+ }
+ case kExprF64UConvertI32:
+ TurboAssembler::Ffint_d_uw(dst.fp(), src.gp());
+ return true;
+ case kExprF64ConvertF32:
+ fcvt_d_s(dst.fp(), src.fp());
+ return true;
+ case kExprF64ReinterpretI64:
+ movgr2fr_d(dst.fp(), src.gp());
+ return true;
+ case kExprI32SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32");
+ return true;
+ case kExprI32UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32");
+ return true;
+ case kExprI32SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64");
+ return true;
+ case kExprI32UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64");
+ return true;
+ case kExprI64SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32");
+ return true;
+ case kExprI64UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32");
+ return true;
+ case kExprI64SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64");
+ return true;
+ case kExprI64UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64");
+ return true;
+ default:
+ return false;
+ }
+}
+
+void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
+ bailout(kComplexOperation, "i32_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
+ bailout(kComplexOperation, "i32_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kComplexOperation, "i64_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kComplexOperation, "i64_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kComplexOperation, "i64_signextend_i32");
+}
+
+void LiftoffAssembler::emit_jump(Label* label) {
+ TurboAssembler::Branch(label);
+}
+
+void LiftoffAssembler::emit_jump(Register target) {
+ TurboAssembler::Jump(target);
+}
+
+void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
+ Label* label, ValueKind kind,
+ Register lhs, Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ if (rhs == no_reg) {
+ DCHECK(kind == kI32 || kind == kI64);
+ TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ } else {
+ DCHECK((kind == kI32 || kind == kI64) ||
+ (is_reference(kind) &&
+ (liftoff_cond == kEqual || liftoff_cond == kUnequal)));
+ TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
+ }
+}
+
+void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
+ Label* label, Register lhs,
+ int32_t imm) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ TurboAssembler::Branch(label, cond, lhs, Operand(imm));
+}
+
+void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ sltui(dst, src, 1);
+}
+
+void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, Register lhs,
+ Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Register tmp = dst;
+ if (dst == lhs || dst == rhs) {
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
+ }
+ // Write 1 as result.
+ TurboAssembler::li(tmp, 1);
+
+ // If negative condition is true, write 0 as result.
+ Condition neg_cond = NegateCondition(cond);
+ TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond);
+
+ // If tmp != dst, result will be moved.
+ TurboAssembler::Move(dst, tmp);
+}
+
+void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
+ sltui(dst, src.gp(), 1);
+}
+
+void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Register tmp = dst;
+ if (dst == lhs.gp() || dst == rhs.gp()) {
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
+ }
+ // Write 1 as result.
+ TurboAssembler::li(tmp, 1);
+
+ // If negative condition is true, write 0 as result.
+ Condition neg_cond = NegateCondition(cond);
+ TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()),
+ neg_cond);
+
+ // If tmp != dst, result will be moved.
+ TurboAssembler::Move(dst, tmp);
+}
+
+namespace liftoff {
+
+inline FPUCondition ConditionToConditionCmpFPU(LiftoffCondition condition,
+ bool* predicate) {
+ switch (condition) {
+ case kEqual:
+ *predicate = true;
+ return CEQ;
+ case kUnequal:
+ *predicate = false;
+ return CEQ;
+ case kUnsignedLessThan:
+ *predicate = true;
+ return CLT;
+ case kUnsignedGreaterEqual:
+ *predicate = false;
+ return CLT;
+ case kUnsignedLessEqual:
+ *predicate = true;
+ return CLE;
+ case kUnsignedGreaterThan:
+ *predicate = false;
+ return CLE;
+ default:
+ *predicate = true;
+ break;
+ }
+ UNREACHABLE();
+}
+
+} // namespace liftoff
+
+void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Label not_nan, cont;
+ TurboAssembler::CompareIsNanF32(lhs, rhs);
+ TurboAssembler::BranchFalseF(&not_nan);
+ // If one of the operands is NaN, return 1 for f32.ne, else 0.
+ if (cond == ne) {
+ TurboAssembler::li(dst, 1);
+ } else {
+ TurboAssembler::Move(dst, zero_reg);
+ }
+ TurboAssembler::Branch(&cont);
+
+ bind(&not_nan);
+
+ TurboAssembler::li(dst, 1);
+ bool predicate;
+ FPUCondition fcond =
+ liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate);
+ TurboAssembler::CompareF32(lhs, rhs, fcond);
+ if (predicate) {
+ TurboAssembler::LoadZeroIfNotFPUCondition(dst);
+ } else {
+ TurboAssembler::LoadZeroIfFPUCondition(dst);
+ }
+
+ bind(&cont);
+}
+
+void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Label not_nan, cont;
+ TurboAssembler::CompareIsNanF64(lhs, rhs);
+ TurboAssembler::BranchFalseF(&not_nan);
+ // If one of the operands is NaN, return 1 for f64.ne, else 0.
+ if (cond == ne) {
+ TurboAssembler::li(dst, 1);
+ } else {
+ TurboAssembler::Move(dst, zero_reg);
+ }
+ TurboAssembler::Branch(&cont);
+
+ bind(&not_nan);
+
+ TurboAssembler::li(dst, 1);
+ bool predicate;
+ FPUCondition fcond =
+ liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate);
+ TurboAssembler::CompareF64(lhs, rhs, fcond);
+ if (predicate) {
+ TurboAssembler::LoadZeroIfNotFPUCondition(dst);
+ } else {
+ TurboAssembler::LoadZeroIfFPUCondition(dst);
+ }
+
+ bind(&cont);
+}
+
+bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
+ LiftoffRegister true_value,
+ LiftoffRegister false_value,
+ ValueKind kind) {
+ return false;
+}
+
+void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
+ SmiCheckMode mode) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ And(scratch, obj, Operand(kSmiTagMask));
+ Condition condition = mode == kJumpOnSmi ? eq : ne;
+ Branch(target, condition, scratch, Operand(zero_reg));
+}
+
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ bailout(kSimd, "load extend and load splat unimplemented");
+}
+
+void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
+ Register addr, Register offset_reg,
+ uintptr_t offset_imm, LoadType type,
+ uint8_t laneidx, uint32_t* protected_load_pc) {
+ bailout(kSimd, "loadlane");
+}
+
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ bailout(kSimd, "storelane");
+}
+
+void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16],
+ bool is_swizzle) {
+ bailout(kSimd, "emit_i8x16_shuffle");
+}
+
+void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_swizzle");
+}
+
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_splat");
+}
+
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_splat");
+}
+
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_splat");
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_splat");
+}
+
+void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_splat");
+}
+
+void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_splat");
+}
+
+#define SIMD_BINOP(name1, name2) \
+ void LiftoffAssembler::emit_##name1##_extmul_low_##name2( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
+ bailout(kSimd, "emit_" #name1 "_extmul_low_" #name2); \
+ } \
+ void LiftoffAssembler::emit_##name1##_extmul_high_##name2( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
+ bailout(kSimd, "emit_" #name1 "_extmul_high_" #name2); \
+ }
+
+SIMD_BINOP(i16x8, i8x16_s)
+SIMD_BINOP(i16x8, i8x16_u)
+
+SIMD_BINOP(i32x4, i16x8_s)
+SIMD_BINOP(i32x4, i16x8_u)
+
+SIMD_BINOP(i64x2, i32x4_s)
+SIMD_BINOP(i64x2, i32x4_u)
+
+#undef SIMD_BINOP
+
+#define SIMD_BINOP(name1, name2) \
+ void LiftoffAssembler::emit_##name1##_extadd_pairwise_##name2( \
+ LiftoffRegister dst, LiftoffRegister src) { \
+ bailout(kSimd, "emit_" #name1 "_extadd_pairwise_" #name2); \
+ }
+
+SIMD_BINOP(i16x8, i8x16_s)
+SIMD_BINOP(i16x8, i8x16_u)
+SIMD_BINOP(i32x4, i16x8_s)
+SIMD_BINOP(i32x4, i16x8_u)
+#undef SIMD_BINOP
+
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "emit_i16x8_q15mulr_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_eq");
+}
+
+void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ne");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_gt_s");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_gt_u");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ge_s");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ge_u");
+}
+
+void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_eq");
+}
+
+void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ne");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_gt_s");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_gt_u");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ge_s");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ge_u");
+}
+
+void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_eq");
+}
+
+void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ne");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_gt_s");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_gt_u");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ge_s");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ge_u");
+}
+
+void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_eq");
+}
+
+void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_ne");
+}
+
+void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_lt");
+}
+
+void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_le");
+}
+
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_eq");
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_ne");
+}
+
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_abs");
+}
+
+void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_eq");
+}
+
+void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_ne");
+}
+
+void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_lt");
+}
+
+void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_le");
+}
+
+void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
+ const uint8_t imms[16]) {
+ bailout(kSimd, "emit_s128_const");
+}
+
+void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
+ bailout(kSimd, "emit_s128_not");
+}
+
+void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_and");
+}
+
+void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_or");
+}
+
+void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_xor");
+}
+
+void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_and_not");
+}
+
+void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ LiftoffRegister mask) {
+ bailout(kSimd, "emit_s128_select");
+}
+
+void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_neg");
+}
+
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v128_anytrue");
+}
+
+void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_alltrue");
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_bitmask");
+}
+
+void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shl");
+}
+
+void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shli");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shr_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shri_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shr_u");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shri_u");
+}
+
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add");
+}
+
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_u");
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub");
+}
+
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_u");
+}
+
+void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_min_s");
+}
+
+void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_min_u");
+}
+
+void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_max_s");
+}
+
+void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_max_u");
+}
+
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_popcnt");
+}
+
+void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_neg");
+}
+
+void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_alltrue");
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_bitmask");
+}
+
+void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shl");
+}
+
+void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shli");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shr_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shri_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shr_u");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shri_u");
+}
+
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add");
+}
+
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_s");
+}
+
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_u");
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub");
+}
+
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_s");
+}
+
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_u");
+}
+
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_mul");
+}
+
+void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_min_s");
+}
+
+void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_min_u");
+}
+
+void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_max_s");
+}
+
+void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_max_u");
+}
+
+void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_neg");
+}
+
+void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_alltrue");
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_bitmask");
+}
+
+void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shl");
+}
+
+void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shli");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shr_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shri_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shr_u");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shri_u");
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_add");
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_sub");
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_mul");
+}
+
+void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_min_s");
+}
+
+void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_min_u");
+}
+
+void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_max_s");
+}
+
+void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_max_u");
+}
+
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_dot_i16x8_s");
+}
+
+void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_neg");
+}
+
+void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_alltrue");
+}
+
+void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_bitmask");
+}
+
+void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shl");
+}
+
+void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shli");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shr_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shri_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shr_u");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shri_u");
+}
+
+void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_add");
+}
+
+void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_sub");
+}
+
+void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_mul");
+}
+
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_gt_s");
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_ge_s");
+}
+
+void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_abs");
+}
+
+void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_neg");
+}
+
+void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_sqrt");
+}
+
+bool LiftoffAssembler::emit_f32x4_ceil(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_ceil");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_floor");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_trunc(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_trunc");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_nearest_int");
+ return true;
+}
+
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_add");
+}
+
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_sub");
+}
+
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_mul");
+}
+
+void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_div");
+}
+
+void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_min");
+}
+
+void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_max");
+}
+
+void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_pmin");
+}
+
+void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_pmax");
+}
+
+void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_abs");
+}
+
+void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_neg");
+}
+
+void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_sqrt");
+}
+
+bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_ceil");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_floor");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_trunc(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_trunc");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_nearest_int");
+ return true;
+}
+
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_add");
+}
+
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_sub");
+}
+
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_mul");
+}
+
+void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_div");
+}
+
+void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_min");
+}
+
+void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_max");
+}
+
+void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_pmin");
+}
+
+void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_pmax");
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_convert_low_i32x4_s");
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_convert_low_i32x4_u");
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_promote_low_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_trunc_sat_f64x2_s_zero");
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_trunc_sat_f64x2_u_zero");
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_uconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_demote_f64x2_zero");
+}
+
+void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_uconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_uconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_sconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_sconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_uconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_uconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_sconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_sconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_uconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_uconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_abs");
+}
+
+void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_abs");
+}
+
+void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_abs");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_extract_lane_s");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_extract_lane_u");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_extract_lane_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_extract_lane_u");
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i32x4_extract_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i64x2_extract_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f32x4_extract_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f64x2_extract_lane");
+}
+
+void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_replace_lane");
+}
+
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_replace_lane");
+}
+
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i64x2_replace_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f64x2_replace_lane");
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
+ TurboAssembler::Ld_d(limit_address, MemOperand(limit_address, 0));
+ TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
+}
+
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
+ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
+}
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ if (FLAG_debug_code) Abort(reason);
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ unsigned num_gp_regs = gp_regs.GetNumRegsSet();
+ if (num_gp_regs) {
+ unsigned offset = num_gp_regs * kSystemPointerSize;
+ addi_d(sp, sp, -offset);
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetFirstRegSet();
+ offset -= kSystemPointerSize;
+ St_d(reg.gp(), MemOperand(sp, offset));
+ gp_regs.clear(reg);
+ }
+ DCHECK_EQ(offset, 0);
+ }
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned num_fp_regs = fp_regs.GetNumRegsSet();
+ if (num_fp_regs) {
+ unsigned slot_size = 8;
+ addi_d(sp, sp, -(num_fp_regs * slot_size));
+ unsigned offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ TurboAssembler::Fst_d(reg.fp(), MemOperand(sp, offset));
+ fp_regs.clear(reg);
+ offset += slot_size;
+ }
+ DCHECK_EQ(offset, num_fp_regs * slot_size);
+ }
+}
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned fp_offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ TurboAssembler::Fld_d(reg.fp(), MemOperand(sp, fp_offset));
+ fp_regs.clear(reg);
+ fp_offset += 8;
+ }
+ if (fp_offset) addi_d(sp, sp, fp_offset);
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ unsigned gp_offset = 0;
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetLastRegSet();
+ Ld_d(reg.gp(), MemOperand(sp, gp_offset));
+ gp_regs.clear(reg);
+ gp_offset += kSystemPointerSize;
+ }
+ addi_d(sp, sp, gp_offset);
+}
+
+void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
+ LiftoffRegList all_spills,
+ LiftoffRegList ref_spills,
+ int spill_offset) {
+ int spill_space_size = 0;
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetFirstRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
+}
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ DCHECK_LT(num_stack_slots,
+ (1 << 16) / kSystemPointerSize); // 16 bit immediate
+ Drop(static_cast<int>(num_stack_slots));
+ Ret();
+}
+
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
+ const LiftoffRegister* args,
+ const LiftoffRegister* rets,
+ ValueKind out_argument_kind, int stack_bytes,
+ ExternalReference ext_ref) {
+ addi_d(sp, sp, -stack_bytes);
+
+ int arg_bytes = 0;
+ for (ValueKind param_kind : sig->parameters()) {
+ liftoff::Store(this, sp, arg_bytes, *args++, param_kind);
+ arg_bytes += element_size_bytes(param_kind);
+ }
+ DCHECK_LE(arg_bytes, stack_bytes);
+
+ // Pass a pointer to the buffer with the arguments to the C function.
+ // On LoongArch, the first argument is passed in {a0}.
+ constexpr Register kFirstArgReg = a0;
+ mov(kFirstArgReg, sp);
+
+ // Now call the C function.
+ constexpr int kNumCCallArgs = 1;
+ PrepareCallCFunction(kNumCCallArgs, kScratchReg);
+ CallCFunction(ext_ref, kNumCCallArgs);
+
+ // Move return value to the right register.
+ const LiftoffRegister* next_result_reg = rets;
+ if (sig->return_count() > 0) {
+ DCHECK_EQ(1, sig->return_count());
+ constexpr Register kReturnReg = a0;
+ if (kReturnReg != next_result_reg->gp()) {
+ Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0));
+ }
+ ++next_result_reg;
+ }
+
+ // Load potential output value from the buffer on the stack.
+ if (out_argument_kind != kVoid) {
+ liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_kind);
+ }
+
+ addi_d(sp, sp, stack_bytes);
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ Call(addr, RelocInfo::WASM_CALL);
+}
+
+void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
+ Jump(addr, RelocInfo::WASM_CALL);
+}
+
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ if (target == no_reg) {
+ Pop(kScratchReg);
+ Call(kScratchReg);
+ } else {
+ Call(target);
+ }
+}
+
+void LiftoffAssembler::TailCallIndirect(Register target) {
+ if (target == no_reg) {
+ Pop(kScratchReg);
+ Jump(kScratchReg);
+ } else {
+ Jump(target);
+ }
+}
+
+void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ addi_d(sp, sp, -size);
+ TurboAssembler::Move(addr, sp);
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ addi_d(sp, sp, size);
+}
+
+void LiftoffAssembler::MaybeOSR() {}
+
+void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
+ ValueKind kind) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Label not_nan;
+ if (kind == kF32) {
+ CompareIsNanF32(src, src);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ CompareIsNanF64(src, src);
+ }
+ BranchFalseShortF(&not_nan);
+ li(scratch, 1);
+ St_w(scratch, MemOperand(dst, 0));
+ bind(&not_nan);
+}
+
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
+ Register tmp_gp,
+ LiftoffRegister tmp_s128,
+ ValueKind lane_kind) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffStackSlots::Construct(int param_slots) {
+ DCHECK_LT(0, slots_.size());
+ SortInPushOrder();
+ int last_stack_slot = param_slots;
+ for (auto& slot : slots_) {
+ const int stack_slot = slot.dst_slot_;
+ int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
+ DCHECK_LT(0, stack_decrement);
+ last_stack_slot = stack_slot;
+ const LiftoffAssembler::VarState& src = slot.src_;
+ switch (src.loc()) {
+ case LiftoffAssembler::VarState::kStack:
+ if (src.kind() != kS128) {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
+ asm_->Push(kScratchReg);
+ } else {
+ asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
+ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_ - 8));
+ asm_->Push(kScratchReg);
+ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
+ asm_->Push(kScratchReg);
+ }
+ break;
+ case LiftoffAssembler::VarState::kRegister: {
+ int pushed_bytes = SlotSizeInBytes(slot);
+ asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
+ liftoff::push(asm_, src.reg(), src.kind());
+ break;
+ }
+ case LiftoffAssembler::VarState::kIntConst: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ asm_->li(kScratchReg, Operand(src.i32_const()));
+ asm_->Push(kScratchReg);
+ break;
+ }
+ }
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 4ab036da8e..35eabecbf0 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -3067,20 +3067,22 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
ValueKind kind) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- li(scratch, 1);
+ Label not_nan;
if (kind == kF32) {
CompareIsNanF32(src, src);
} else {
DCHECK_EQ(kind, kF64);
CompareIsNanF64(src, src);
}
- LoadZeroIfNotFPUCondition(scratch);
- Sw(scratch, MemOperand(dst));
+ BranchFalseShortF(&not_nan, USE_DELAY_SLOT);
+ li(scratch, 1);
+ sw(scratch, MemOperand(dst));
+ bind(&not_nan);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 0a23c190e9..e47da84148 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -3235,22 +3235,35 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
ValueKind kind) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- li(scratch, 1);
+ Label not_nan;
if (kind == kF32) {
CompareIsNanF32(src, src);
} else {
DCHECK_EQ(kind, kF64);
CompareIsNanF64(src, src);
}
- LoadZeroIfNotFPUCondition(scratch);
- Sd(scratch, MemOperand(dst));
+ BranchFalseShortF(&not_nan, USE_DELAY_SLOT);
+ li(scratch, 1);
+ Sw(dst, MemOperand(dst));
+ bind(&not_nan);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ Label not_nan;
+ if (lane_kind == kF32) {
+ fcun_w(tmp_s128.fp().toW(), src.fp().toW(), src.fp().toW());
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ fcun_d(tmp_s128.fp().toW(), src.fp().toW(), src.fp().toW());
+ }
+ BranchMSA(&not_nan, MSA_BRANCH_V, all_zero, tmp_s128.fp().toW(),
+ USE_DELAY_SLOT);
+ li(tmp_gp, 1);
+ Sw(tmp_gp, MemOperand(dst));
+ bind(&not_nan);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 8e3808d259..617e193bd1 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -41,7 +41,7 @@ namespace liftoff {
//
//
-constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
+constexpr int32_t kInstanceOffset = 3 * kSystemPointerSize;
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset =
@@ -106,7 +106,26 @@ int LiftoffAssembler::PrepareStackFrame() {
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
int stack_param_delta) {
- bailout(kUnsupportedArchitecture, "PrepareTailCall");
+ Register scratch = ip;
+ // Push the return address and frame pointer to complete the stack frame.
+ AddS64(sp, sp, Operand(-2 * kSystemPointerSize), r0);
+ LoadU64(scratch, MemOperand(fp, kSystemPointerSize), r0);
+ StoreU64(scratch, MemOperand(sp, kSystemPointerSize), r0);
+ LoadU64(scratch, MemOperand(fp), r0);
+ StoreU64(scratch, MemOperand(sp), r0);
+
+ // Shift the whole frame upwards.
+ int slot_count = num_callee_stack_params + 2;
+ for (int i = slot_count - 1; i >= 0; --i) {
+ LoadU64(scratch, MemOperand(sp, i * kSystemPointerSize), r0);
+ StoreU64(scratch,
+ MemOperand(fp, (i - stack_param_delta) * kSystemPointerSize), r0);
+ }
+
+ // Set the new stack and frame pointer.
+ AddS64(sp, fp, Operand(-stack_param_delta * kSystemPointerSize), r0);
+ Pop(r0, fp);
+ mtlr(r0);
}
void LiftoffAssembler::AlignFrameSize() {}
@@ -169,14 +188,14 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
case kF32: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- mov(scratch, Operand(value.to_f32_boxed().get_scalar()));
- MovIntToFloat(reg.fp(), scratch);
+ mov(scratch, Operand(value.to_f32_boxed().get_bits()));
+ MovIntToFloat(reg.fp(), scratch, ip);
break;
}
case kF64: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- mov(scratch, Operand(value.to_f64_boxed().get_scalar()));
+ mov(scratch, Operand(value.to_f64_boxed().get_bits()));
MovInt64ToDouble(reg.fp(), scratch);
break;
}
@@ -750,12 +769,19 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
#define SIGN_EXT(r) extsw(r, r)
#define ROUND_F64_TO_F32(fpr) frsp(fpr, fpr)
#define INT32_AND_WITH_1F(x) Operand(x & 0x1f)
+#define INT32_AND_WITH_3F(x) Operand(x & 0x3f)
#define REGISTER_AND_WITH_1F \
([&](Register rhs) { \
andi(r0, rhs, Operand(31)); \
return r0; \
})
+#define REGISTER_AND_WITH_3F \
+ ([&](Register rhs) { \
+ andi(r0, rhs, Operand(63)); \
+ return r0; \
+ })
+
#define LFR_TO_REG(reg) reg.gp()
// V(name, instr, dtype, stype, dcast, scast, rcast, return_val, return_type)
@@ -772,16 +798,12 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
true, bool) \
V(f32_trunc, friz, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, \
true, bool) \
- V(f32_nearest_int, frin, DoubleRegister, DoubleRegister, , , \
- ROUND_F64_TO_F32, true, bool) \
V(f64_abs, fabs, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_neg, fneg, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_sqrt, fsqrt, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_floor, frim, DoubleRegister, DoubleRegister, , , USE, true, bool) \
V(f64_ceil, frip, DoubleRegister, DoubleRegister, , , USE, true, bool) \
V(f64_trunc, friz, DoubleRegister, DoubleRegister, , , USE, true, bool) \
- V(f64_nearest_int, frin, DoubleRegister, DoubleRegister, , , USE, true, \
- bool) \
V(i32_clz, CountLeadingZerosU32, Register, Register, , , USE, , void) \
V(i32_ctz, CountTrailingZerosU32, Register, Register, , , USE, , void) \
V(i64_clz, CountLeadingZerosU64, LiftoffRegister, LiftoffRegister, \
@@ -873,17 +895,17 @@ UNOP_LIST(EMIT_UNOP_FUNCTION)
V(i32_shr, ShiftRightU32, Register, Register, Register, , , \
REGISTER_AND_WITH_1F, USE, , void) \
V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, , USE, , void) \
+ LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, , USE, , void) \
+ LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, , USE, , void) \
+ LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
+ LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
+ LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
+ LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
USE, , void) \
V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
@@ -921,53 +943,139 @@ BINOP_LIST(EMIT_BINOP_FUNCTION)
#undef REGISTER_AND_WITH_1F
#undef LFR_TO_REG
+bool LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ return false;
+}
+
+bool LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ return false;
+}
+
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- bailout(kUnsupportedArchitecture, "i32_divs");
+ Label cont;
+
+ // Check for division by zero.
+ CmpS32(rhs, Operand::Zero(), r0);
+ b(eq, trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS32(rhs, Operand(-1), r0);
+ bne(&cont);
+ CmpS32(lhs, Operand(kMinInt), r0);
+ b(eq, trap_div_unrepresentable);
+
+ bind(&cont);
+ DivS32(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_divu");
+ CmpS32(rhs, Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ DivU32(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_rems");
+ Label cont, done, trap_div_unrepresentable;
+ // Check for division by zero.
+ CmpS32(rhs, Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+
+ // Check kMinInt/-1 case.
+ CmpS32(rhs, Operand(-1), r0);
+ bne(&cont);
+ CmpS32(lhs, Operand(kMinInt), r0);
+ beq(&trap_div_unrepresentable);
+
+ // Continue noraml calculation.
+ bind(&cont);
+ ModS32(dst, lhs, rhs);
+ bne(&done);
+
+ // trap by kMinInt/-1 case.
+ bind(&trap_div_unrepresentable);
+ mov(dst, Operand(0));
+ bind(&done);
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_remu");
+ CmpS32(rhs, Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ ModU32(dst, lhs, rhs);
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- bailout(kUnsupportedArchitecture, "i64_divs");
+ constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
+ Label cont;
+ // Check for division by zero.
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS64(rhs.gp(), Operand(-1), r0);
+ bne(&cont);
+ CmpS64(lhs.gp(), Operand(kMinInt64), r0);
+ beq(trap_div_unrepresentable);
+
+ bind(&cont);
+ DivS64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_divu");
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ // Do div.
+ DivU64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_rems");
+ constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
+
+ Label trap_div_unrepresentable;
+ Label done;
+ Label cont;
+
+ // Check for division by zero.
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS64(rhs.gp(), Operand(-1), r0);
+ bne(&cont);
+ CmpS64(lhs.gp(), Operand(kMinInt64), r0);
+ beq(&trap_div_unrepresentable);
+
+ bind(&cont);
+ ModS64(dst.gp(), lhs.gp(), rhs.gp());
+ bne(&done);
+
+ bind(&trap_div_unrepresentable);
+ mov(dst.gp(), Operand(0));
+ bind(&done);
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_remu");
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ ModU64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
@@ -1083,10 +1191,10 @@ void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
- fcmpu(lhs, rhs);
+ fcmpu(lhs, rhs, cr7);
Label done;
mov(dst, Operand(1));
- b(liftoff::ToCondition(liftoff_cond), &done);
+ b(liftoff::ToCondition(liftoff_cond), &done, cr7);
mov(dst, Operand::Zero());
bind(&done);
}
@@ -1114,7 +1222,9 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
SmiCheckMode mode) {
- bailout(kUnsupportedArchitecture, "emit_smi_check");
+ TestIfSmi(obj, r0);
+ Condition condition = mode == kJumpOnSmi ? eq : ne;
+ b(condition, target, cr0); // branch if SMI
}
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
@@ -2266,18 +2376,31 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
- bailout(kUnsupportedArchitecture, "PushRegisters");
+ MultiPush(regs.GetGpList());
+ MultiPushDoubles(regs.GetFpList());
}
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
- bailout(kUnsupportedArchitecture, "PopRegisters");
+ MultiPopDoubles(regs.GetFpList());
+ MultiPop(regs.GetGpList());
}
void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
LiftoffRegList all_spills,
LiftoffRegList ref_spills,
int spill_offset) {
- bailout(kRefTypes, "RecordSpillsInSafepoint");
+ int spill_space_size = 0;
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetLastRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
@@ -2289,15 +2412,95 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* rets,
ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
- bailout(kUnsupportedArchitecture, "CallC");
+ int total_size = RoundUp(stack_bytes, kSystemPointerSize);
+
+ int size = total_size;
+ constexpr int kStackPageSize = 4 * KB;
+
+ // Reserve space in the stack.
+ while (size > kStackPageSize) {
+ SubS64(sp, sp, Operand(kStackPageSize), r0);
+ StoreU64(r0, MemOperand(sp));
+ size -= kStackPageSize;
+ }
+
+ SubS64(sp, sp, Operand(size), r0);
+
+ int arg_bytes = 0;
+ for (ValueKind param_kind : sig->parameters()) {
+ switch (param_kind) {
+ case kI32:
+ StoreU32(args->gp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ case kI64:
+ StoreU64(args->gp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ case kF32:
+ StoreF32(args->fp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ case kF64:
+ StoreF64(args->fp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ args++;
+ arg_bytes += element_size_bytes(param_kind);
+ }
+
+ DCHECK_LE(arg_bytes, stack_bytes);
+
+ // Pass a pointer to the buffer with the arguments to the C function.
+ mr(r3, sp);
+
+ // Now call the C function.
+ constexpr int kNumCCallArgs = 1;
+ PrepareCallCFunction(kNumCCallArgs, r0);
+ CallCFunction(ext_ref, kNumCCallArgs);
+
+ // Move return value to the right register.
+ const LiftoffRegister* result_reg = rets;
+ if (sig->return_count() > 0) {
+ DCHECK_EQ(1, sig->return_count());
+ constexpr Register kReturnReg = r3;
+ if (kReturnReg != rets->gp()) {
+ Move(*rets, LiftoffRegister(kReturnReg), sig->GetReturn(0));
+ }
+ result_reg++;
+ }
+
+ // Load potential output value from the buffer on the stack.
+ if (out_argument_kind != kVoid) {
+ switch (out_argument_kind) {
+ case kI32:
+ LoadS32(result_reg->gp(), MemOperand(sp));
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
+ LoadU64(result_reg->gp(), MemOperand(sp));
+ break;
+ case kF32:
+ LoadF32(result_reg->fp(), MemOperand(sp));
+ break;
+ case kF64:
+ LoadF64(result_reg->fp(), MemOperand(sp));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ AddS64(sp, sp, Operand(total_size), r0);
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
- bailout(kUnsupportedArchitecture, "CallNativeWasmCode");
+ Call(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
- bailout(kUnsupportedArchitecture, "TailCallNativeWasmCode");
+ Jump(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
@@ -2315,11 +2518,12 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- bailout(kUnsupportedArchitecture, "AllocateStackSlot");
+ SubS64(sp, sp, Operand(size), r0);
+ mr(addr, sp);
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
- bailout(kUnsupportedArchitecture, "DeallocateStackSlot");
+ AddS64(sp, sp, Operand(size));
}
void LiftoffAssembler::MaybeOSR() {}
@@ -2329,15 +2533,114 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
UNIMPLEMENTED();
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
UNIMPLEMENTED();
}
void LiftoffStackSlots::Construct(int param_slots) {
- asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
+ DCHECK_LT(0, slots_.size());
+ SortInPushOrder();
+ int last_stack_slot = param_slots;
+ for (auto& slot : slots_) {
+ const int stack_slot = slot.dst_slot_;
+ int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
+ DCHECK_LT(0, stack_decrement);
+ last_stack_slot = stack_slot;
+ const LiftoffAssembler::VarState& src = slot.src_;
+ switch (src.loc()) {
+ case LiftoffAssembler::VarState::kStack: {
+ switch (src.kind()) {
+ case kI32:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ case kI64: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ UseScratchRegisterScope temps(asm_);
+ Register scratch = temps.Acquire();
+ asm_->LoadU64(scratch, liftoff::GetStackSlot(slot.src_offset_), r0);
+ asm_->Push(scratch);
+ break;
+ }
+ case kF32: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ asm_->LoadF32(kScratchDoubleReg,
+ liftoff::GetStackSlot(slot.src_offset_), r0);
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize));
+ asm_->StoreF32(kScratchDoubleReg, MemOperand(sp), r0);
+ break;
+ }
+ case kF64: {
+ asm_->AllocateStackSpace(stack_decrement - kDoubleSize);
+ asm_->LoadF64(kScratchDoubleReg,
+ liftoff::GetStackSlot(slot.src_offset_), r0);
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize), r0);
+ asm_->StoreF64(kScratchDoubleReg, MemOperand(sp), r0);
+ break;
+ }
+ case kS128: {
+ asm_->bailout(kSimd, "LiftoffStackSlots::Construct");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case LiftoffAssembler::VarState::kRegister: {
+ int pushed_bytes = SlotSizeInBytes(slot);
+ asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
+ switch (src.kind()) {
+ case kI64:
+ case kI32:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ asm_->push(src.reg().gp());
+ break;
+ case kF32:
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize), r0);
+ asm_->StoreF32(src.reg().fp(), MemOperand(sp), r0);
+ break;
+ case kF64:
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize), r0);
+ asm_->StoreF64(src.reg().fp(), MemOperand(sp), r0);
+ break;
+ case kS128: {
+ asm_->bailout(kSimd, "LiftoffStackSlots::Construct");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case LiftoffAssembler::VarState::kIntConst: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ DCHECK(src.kind() == kI32 || src.kind() == kI64);
+ UseScratchRegisterScope temps(asm_);
+ Register scratch = temps.Acquire();
+
+ switch (src.kind()) {
+ case kI32:
+ asm_->mov(scratch, Operand(src.i32_const()));
+ break;
+ case kI64:
+ asm_->mov(scratch, Operand(int64_t{slot.src_.i32_const()}));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ asm_->push(scratch);
+ break;
+ }
+ }
+ }
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
index fef59471c1..1860a1920f 100644
--- a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
+++ b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
@@ -79,16 +79,16 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm, Register addr,
if (is_uint31(offset_imm)) {
int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
if (offset == no_reg) return MemOperand(addr, offset_imm32);
- assm->Add64(kScratchReg, addr, offset);
- return MemOperand(kScratchReg, offset_imm32);
+ assm->Add64(kScratchReg2, addr, offset);
+ return MemOperand(kScratchReg2, offset_imm32);
}
// Offset immediate does not fit in 31 bits.
- assm->li(kScratchReg, offset_imm);
- assm->Add64(kScratchReg, kScratchReg, addr);
+ assm->li(kScratchReg2, offset_imm);
+ assm->Add64(kScratchReg2, kScratchReg2, addr);
if (offset != no_reg) {
- assm->Add64(kScratchReg, kScratchReg, offset);
+ assm->Add64(kScratchReg2, kScratchReg2, offset);
}
- return MemOperand(kScratchReg, 0);
+ return MemOperand(kScratchReg2, 0);
}
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
@@ -128,10 +128,10 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
assm->Usd(src.gp(), dst);
break;
case kF32:
- assm->UStoreFloat(src.fp(), dst);
+ assm->UStoreFloat(src.fp(), dst, kScratchReg);
break;
case kF64:
- assm->UStoreDouble(src.fp(), dst);
+ assm->UStoreDouble(src.fp(), dst, kScratchReg);
break;
default:
UNREACHABLE();
@@ -335,7 +335,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
// space if we first allocate the frame and then do the stack check (we will
// need some remaining stack space for throwing the exception). That's why we
// check the available stack space before we allocate the frame. To do this we
- // replace the {__ Daddu(sp, sp, -frame_size)} with a jump to OOL code that
+ // replace the {__ Add64(sp, sp, -frame_size)} with a jump to OOL code that
// does this "extended stack check".
//
// The OOL code can simply be generated here with the normal assembler,
@@ -376,7 +376,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
Add64(sp, sp, Operand(-frame_size));
// Jump back to the start of the function, from {pc_offset()} to
- // right after the reserved space for the {__ Daddu(sp, sp, -framesize)}
+ // right after the reserved space for the {__ Add64(sp, sp, -framesize)}
// (which is a Branch now).
int func_start_offset = offset + 2 * kInstrSize;
imm32 = func_start_offset - pc_offset();
@@ -552,11 +552,20 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
TurboAssembler::Uld(dst.gp(), src_op);
break;
case LoadType::kF32Load:
- TurboAssembler::ULoadFloat(dst.fp(), src_op);
+ TurboAssembler::ULoadFloat(dst.fp(), src_op, kScratchReg);
break;
case LoadType::kF64Load:
- TurboAssembler::ULoadDouble(dst.fp(), src_op);
+ TurboAssembler::ULoadDouble(dst.fp(), src_op, kScratchReg);
break;
+ case LoadType::kS128Load: {
+ VU.set(kScratchReg, E8, m1);
+ Register src_reg = src_op.offset() == 0 ? src_op.rm() : kScratchReg;
+ if (src_op.offset() != 0) {
+ TurboAssembler::Add64(src_reg, src_op.rm(), src_op.offset());
+ }
+ vl(dst.fp().toV(), src_reg, 0, E8);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -607,11 +616,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
TurboAssembler::Usd(src.gp(), dst_op);
break;
case StoreType::kF32Store:
- TurboAssembler::UStoreFloat(src.fp(), dst_op);
+ TurboAssembler::UStoreFloat(src.fp(), dst_op, kScratchReg);
break;
case StoreType::kF64Store:
- TurboAssembler::UStoreDouble(src.fp(), dst_op);
+ TurboAssembler::UStoreDouble(src.fp(), dst_op, kScratchReg);
break;
+ case StoreType::kS128Store: {
+ VU.set(kScratchReg, E8, m1);
+ Register dst_reg = dst_op.offset() == 0 ? dst_op.rm() : kScratchReg;
+ if (dst_op.offset() != 0) {
+ Add64(kScratchReg, dst_op.rm(), dst_op.offset());
+ }
+ vs(src.fp().toV(), dst_reg, 0, VSew::E8);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -747,24 +765,26 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
switch (type.value()) {
case LoadType::kI32Load8U:
case LoadType::kI64Load8U:
+ fence(PSR | PSW, PSR | PSW);
lbu(dst.gp(), src_reg, 0);
- sync();
+ fence(PSR, PSR | PSW);
return;
case LoadType::kI32Load16U:
case LoadType::kI64Load16U:
+ fence(PSR | PSW, PSR | PSW);
lhu(dst.gp(), src_reg, 0);
- sync();
+ fence(PSR, PSR | PSW);
return;
case LoadType::kI32Load:
- lr_w(true, true, dst.gp(), src_reg);
- return;
case LoadType::kI64Load32U:
- lr_w(true, true, dst.gp(), src_reg);
- slli(dst.gp(), dst.gp(), 32);
- srli(dst.gp(), dst.gp(), 32);
+ fence(PSR | PSW, PSR | PSW);
+ lw(dst.gp(), src_reg, 0);
+ fence(PSR, PSR | PSW);
return;
case LoadType::kI64Load:
- lr_d(true, true, dst.gp(), src_reg);
+ fence(PSR | PSW, PSR | PSW);
+ ld(dst.gp(), src_reg, 0);
+ fence(PSR, PSR | PSW);
return;
default:
UNREACHABLE();
@@ -780,22 +800,22 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
switch (type.value()) {
case StoreType::kI64Store8:
case StoreType::kI32Store8:
- sync();
+ fence(PSR | PSW, PSW);
sb(src.gp(), dst_reg, 0);
- sync();
return;
case StoreType::kI64Store16:
case StoreType::kI32Store16:
- sync();
+ fence(PSR | PSW, PSW);
sh(src.gp(), dst_reg, 0);
- sync();
return;
case StoreType::kI64Store32:
case StoreType::kI32Store:
- sc_w(true, true, zero_reg, dst_reg, src.gp());
+ fence(PSR | PSW, PSW);
+ sw(src.gp(), dst_reg, 0);
return;
case StoreType::kI64Store:
- sc_d(true, true, zero_reg, dst_reg, src.gp());
+ fence(PSR | PSW, PSW);
+ sd(src.gp(), dst_reg, 0);
return;
default:
UNREACHABLE();
@@ -948,7 +968,11 @@ void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueKind kind) {
DCHECK_NE(dst, src);
- TurboAssembler::Move(dst, src);
+ if (kind != kS128) {
+ TurboAssembler::Move(dst, src);
+ } else {
+ TurboAssembler::vmv_vv(dst.toV(), dst.toV());
+ }
}
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
@@ -971,9 +995,15 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
case kF64:
TurboAssembler::StoreDouble(reg.fp(), dst);
break;
- case kS128:
- bailout(kSimd, "Spill S128");
+ case kS128: {
+ VU.set(kScratchReg, E8, m1);
+ Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg;
+ if (dst.offset() != 0) {
+ Add64(kScratchReg, dst.rm(), dst.offset());
+ }
+ vs(reg.fp().toV(), dst_reg, 0, VSew::E8);
break;
+ }
default:
UNREACHABLE();
}
@@ -1021,6 +1051,15 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
case kF64:
TurboAssembler::LoadDouble(reg.fp(), src);
break;
+ case kS128: {
+ VU.set(kScratchReg, E8, m1);
+ Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
+ if (src.offset() != 0) {
+ TurboAssembler::Add64(src_reg, src.rm(), src.offset());
+ }
+ vl(reg.fp().toV(), src_reg, 0, E8);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -1072,7 +1111,7 @@ void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- TurboAssembler::Popcnt64(dst.gp(), src.gp());
+ TurboAssembler::Popcnt64(dst.gp(), src.gp(), kScratchReg);
return true;
}
@@ -1154,7 +1193,7 @@ void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
}
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
- TurboAssembler::Popcnt32(dst, src);
+ TurboAssembler::Popcnt32(dst, src, kScratchReg);
return true;
}
@@ -1663,7 +1702,33 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
LiftoffRegister rhs,
const uint8_t shuffle[16],
bool is_swizzle) {
- bailout(kSimd, "emit_i8x16_shuffle");
+ VRegister dst_v = dst.fp().toV();
+ VRegister lhs_v = lhs.fp().toV();
+ VRegister rhs_v = rhs.fp().toV();
+
+ uint64_t imm1 = *(reinterpret_cast<const uint64_t*>(shuffle));
+ uint64_t imm2 = *((reinterpret_cast<const uint64_t*>(shuffle)) + 1);
+ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
+ li(kScratchReg, 1);
+ vmv_vx(v0, kScratchReg);
+ li(kScratchReg, imm1);
+ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+ li(kScratchReg, imm2);
+ vsll_vi(v0, v0, 1);
+ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+
+ VU.set(kScratchReg, E8, m1);
+ if (dst_v == lhs_v) {
+ vmv_vv(kSimd128ScratchReg2, lhs_v);
+ lhs_v = kSimd128ScratchReg2;
+ } else if (dst_v == rhs_v) {
+ vmv_vv(kSimd128ScratchReg2, rhs_v);
+ rhs_v = kSimd128ScratchReg2;
+ }
+ vrgather_vv(dst_v, lhs_v, kSimd128ScratchReg);
+ vadd_vi(kSimd128ScratchReg, kSimd128ScratchReg, -16);
+ vrgather_vv(kSimd128ScratchReg, rhs_v, kSimd128ScratchReg);
+ vor_vv(dst_v, dst_v, kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
@@ -1679,42 +1744,46 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_splat");
+ VU.set(kScratchReg, E8, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_splat");
+ VU.set(kScratchReg, E16, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_splat");
+ VU.set(kScratchReg, E32, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i64x2_splat");
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2.gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2.ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
@@ -1756,7 +1825,11 @@ void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2_bitmask");
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ VU.set(kScratchReg, E32, m1);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
@@ -1781,92 +1854,92 @@ void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_gt_u");
+ WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_ge_u");
+ WasmRvvGeU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_gt_u");
+ WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_ge_u");
+ WasmRvvGeU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_gt_u");
+ WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_ge_u");
+ WasmRvvGeU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1941,32 +2014,38 @@ void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
- bailout(kSimd, "emit_s128_const");
+ WasmRvvS128const(dst.fp().toV(), imms);
}
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
- bailout(kSimd, "emit_s128_not");
+ VU.set(kScratchReg, E8, m1);
+ vnot_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_and");
+ VU.set(kScratchReg, E8, m1);
+ vand_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_or");
+ VU.set(kScratchReg, E8, m1);
+ vor_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_xor");
+ VU.set(kScratchReg, E8, m1);
+ vxor_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_and_not");
+ VU.set(kScratchReg, E8, m1);
+ vnot_vv(dst.fp().toV(), rhs.fp().toV());
+ vand_vv(dst.fp().toV(), lhs.fp().toV(), dst.fp().toV());
}
void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
@@ -1978,32 +2057,55 @@ void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_neg");
+ VU.set(kScratchReg, E8, m1);
+ vneg_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_v128_anytrue");
+ VU.set(kScratchReg, E8, m1);
+ Label t;
+ vmv_sx(kSimd128ScratchReg, zero_reg);
+ vredmaxu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beq(dst.gp(), zero_reg, &t);
+ li(dst.gp(), 1);
+ bind(&t);
}
void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_alltrue");
+ VU.set(kScratchReg, E8, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_bitmask");
+ VU.set(kScratchReg, E8, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ VU.set(kScratchReg, E32, m1);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_shl");
+ VU.set(kScratchReg, E8, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i8x16_shli");
+ DCHECK(is_uint5(rhs));
+ VU.set(kScratchReg, E8, m1);
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
}
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
@@ -2030,36 +2132,42 @@ void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add");
+ VU.set(kScratchReg, E8, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add_sat_s");
+ VU.set(kScratchReg, E8, m1);
+ vsadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add_sat_u");
+ VU.set(kScratchReg, E8, m1);
+ vsaddu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub");
+ VU.set(kScratchReg, E8, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub_sat_s");
+ VU.set(kScratchReg, E8, m1);
+ vssub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub_sat_u");
+ VU.set(kScratchReg, E8, m1);
+ vssubu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
@@ -2093,22 +2201,37 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_alltrue");
+ VU.set(kScratchReg, E16, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_bitmask");
+ VU.set(kScratchReg, E16, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ VU.set(kScratchReg, E32, m1);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_shl");
+ VU.set(kScratchReg, E16, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i16x8_shli");
+ DCHECK(is_uint5(rhs));
+ VU.set(kScratchReg, E16, m1);
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
}
void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
@@ -2135,7 +2258,8 @@ void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_add");
+ VU.set(kScratchReg, E16, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
@@ -2152,7 +2276,8 @@ void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_sub");
+ VU.set(kScratchReg, E16, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
@@ -2203,22 +2328,36 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_alltrue");
+ VU.set(kScratchReg, E32, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_bitmask");
+ VU.set(kScratchReg, E32, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_shl");
+ VU.set(kScratchReg, E32, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i32x4_shli");
+ DCHECK(is_uint5(rhs));
+ VU.set(kScratchReg, E32, m1);
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
}
void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
@@ -2245,12 +2384,14 @@ void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_add");
+ VU.set(kScratchReg, E32, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_sub");
+ VU.set(kScratchReg, E32, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2295,17 +2436,32 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i64x2_alltrue");
+ VU.set(kScratchReg, E64, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_shl");
+ VU.set(kScratchReg, E64, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i64x2_shli");
+ VU.set(kScratchReg, E64, m1);
+ if (is_uint5(rhs)) {
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
+ } else {
+ li(kScratchReg, rhs);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
+ }
}
void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
@@ -2332,12 +2488,14 @@ void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_add");
+ VU.set(kScratchReg, E64, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_sub");
+ VU.set(kScratchReg, E8, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2637,7 +2795,11 @@ void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_abs");
+ VU.set(kScratchReg, E32, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmv_vv(dst.fp().toV(), src.fp().toV());
+ vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
+ vsub_vv(dst.fp().toV(), kSimd128RegZero, src.fp().toV(), Mask);
}
void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
@@ -2667,7 +2829,9 @@ void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i32x4_extract_lane");
+ VU.set(kScratchReg, E32, m1);
+ vslidedown_vi(v31, lhs.fp().toV(), imm_lane_idx);
+ vmv_xs(dst.gp(), v31);
}
void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
@@ -2692,28 +2856,40 @@ void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i8x16_replace_lane");
+ VU.set(kScratchReg, E8, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i16x8_replace_lane");
+ VU.set(kScratchReg, E16, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i32x4_replace_lane");
+ VU.set(kScratchReg, E32, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i64x2_replace_lane");
+ VU.set(kScratchReg, E64, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
@@ -2730,9 +2906,9 @@ void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
bailout(kSimd, "emit_f64x2_replace_lane");
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
bailout(kSimd, "emit_s128_set_if_nan");
}
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 722b0b074b..3db9ea0975 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -2143,81 +2143,116 @@ void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
b(condition, target); // branch if SMI
}
-#define SIMD_BINOP_LIST(V) \
- V(f64x2_add, F64x2Add) \
- V(f64x2_sub, F64x2Sub) \
- V(f64x2_mul, F64x2Mul) \
- V(f64x2_div, F64x2Div) \
- V(f64x2_min, F64x2Min) \
- V(f64x2_max, F64x2Max) \
- V(f64x2_eq, F64x2Eq) \
- V(f64x2_ne, F64x2Ne) \
- V(f64x2_lt, F64x2Lt) \
- V(f64x2_le, F64x2Le) \
- V(f32x4_add, F32x4Add) \
- V(f32x4_sub, F32x4Sub) \
- V(f32x4_mul, F32x4Mul) \
- V(f32x4_div, F32x4Div) \
- V(f32x4_min, F32x4Min) \
- V(f32x4_max, F32x4Max) \
- V(f32x4_eq, F32x4Eq) \
- V(f32x4_ne, F32x4Ne) \
- V(f32x4_lt, F32x4Lt) \
- V(f32x4_le, F32x4Le) \
- V(i64x2_add, I64x2Add) \
- V(i64x2_sub, I64x2Sub) \
- V(i64x2_mul, I64x2Mul) \
- V(i64x2_eq, I64x2Eq) \
- V(i64x2_ne, I64x2Ne) \
- V(i64x2_gt_s, I64x2GtS) \
- V(i64x2_ge_s, I64x2GeS) \
- V(i32x4_add, I32x4Add) \
- V(i32x4_sub, I32x4Sub) \
- V(i32x4_mul, I32x4Mul) \
- V(i32x4_eq, I32x4Eq) \
- V(i32x4_ne, I32x4Ne) \
- V(i32x4_gt_s, I32x4GtS) \
- V(i32x4_ge_s, I32x4GeS) \
- V(i32x4_gt_u, I32x4GtU) \
- V(i32x4_ge_u, I32x4GeU) \
- V(i32x4_min_s, I32x4MinS) \
- V(i32x4_min_u, I32x4MinU) \
- V(i32x4_max_s, I32x4MaxS) \
- V(i32x4_max_u, I32x4MaxU) \
- V(i16x8_add, I16x8Add) \
- V(i16x8_sub, I16x8Sub) \
- V(i16x8_mul, I16x8Mul) \
- V(i16x8_eq, I16x8Eq) \
- V(i16x8_ne, I16x8Ne) \
- V(i16x8_gt_s, I16x8GtS) \
- V(i16x8_ge_s, I16x8GeS) \
- V(i16x8_gt_u, I16x8GtU) \
- V(i16x8_ge_u, I16x8GeU) \
- V(i16x8_min_s, I16x8MinS) \
- V(i16x8_min_u, I16x8MinU) \
- V(i16x8_max_s, I16x8MaxS) \
- V(i16x8_max_u, I16x8MaxU) \
- V(i8x16_add, I8x16Add) \
- V(i8x16_sub, I8x16Sub) \
- V(i8x16_eq, I8x16Eq) \
- V(i8x16_ne, I8x16Ne) \
- V(i8x16_gt_s, I8x16GtS) \
- V(i8x16_ge_s, I8x16GeS) \
- V(i8x16_gt_u, I8x16GtU) \
- V(i8x16_ge_u, I8x16GeU) \
- V(i8x16_min_s, I8x16MinS) \
- V(i8x16_min_u, I8x16MinU) \
- V(i8x16_max_s, I8x16MaxS) \
- V(i8x16_max_u, I8x16MaxU)
-
-#define EMIT_SIMD_BINOP(name, op) \
+#define SIMD_BINOP_RR_LIST(V) \
+ V(f64x2_add, F64x2Add, fp) \
+ V(f64x2_sub, F64x2Sub, fp) \
+ V(f64x2_mul, F64x2Mul, fp) \
+ V(f64x2_div, F64x2Div, fp) \
+ V(f64x2_min, F64x2Min, fp) \
+ V(f64x2_max, F64x2Max, fp) \
+ V(f64x2_eq, F64x2Eq, fp) \
+ V(f64x2_ne, F64x2Ne, fp) \
+ V(f64x2_lt, F64x2Lt, fp) \
+ V(f64x2_le, F64x2Le, fp) \
+ V(f32x4_add, F32x4Add, fp) \
+ V(f32x4_sub, F32x4Sub, fp) \
+ V(f32x4_mul, F32x4Mul, fp) \
+ V(f32x4_div, F32x4Div, fp) \
+ V(f32x4_min, F32x4Min, fp) \
+ V(f32x4_max, F32x4Max, fp) \
+ V(f32x4_eq, F32x4Eq, fp) \
+ V(f32x4_ne, F32x4Ne, fp) \
+ V(f32x4_lt, F32x4Lt, fp) \
+ V(f32x4_le, F32x4Le, fp) \
+ V(i64x2_add, I64x2Add, fp) \
+ V(i64x2_sub, I64x2Sub, fp) \
+ V(i64x2_mul, I64x2Mul, fp) \
+ V(i64x2_eq, I64x2Eq, fp) \
+ V(i64x2_ne, I64x2Ne, fp) \
+ V(i64x2_gt_s, I64x2GtS, fp) \
+ V(i64x2_ge_s, I64x2GeS, fp) \
+ V(i64x2_shl, I64x2Shl, gp) \
+ V(i64x2_shr_s, I64x2ShrS, gp) \
+ V(i64x2_shr_u, I64x2ShrU, gp) \
+ V(i32x4_add, I32x4Add, fp) \
+ V(i32x4_sub, I32x4Sub, fp) \
+ V(i32x4_mul, I32x4Mul, fp) \
+ V(i32x4_eq, I32x4Eq, fp) \
+ V(i32x4_ne, I32x4Ne, fp) \
+ V(i32x4_gt_s, I32x4GtS, fp) \
+ V(i32x4_ge_s, I32x4GeS, fp) \
+ V(i32x4_gt_u, I32x4GtU, fp) \
+ V(i32x4_ge_u, I32x4GeU, fp) \
+ V(i32x4_min_s, I32x4MinS, fp) \
+ V(i32x4_min_u, I32x4MinU, fp) \
+ V(i32x4_max_s, I32x4MaxS, fp) \
+ V(i32x4_max_u, I32x4MaxU, fp) \
+ V(i32x4_shl, I32x4Shl, gp) \
+ V(i32x4_shr_s, I32x4ShrS, gp) \
+ V(i32x4_shr_u, I32x4ShrU, gp) \
+ V(i16x8_add, I16x8Add, fp) \
+ V(i16x8_sub, I16x8Sub, fp) \
+ V(i16x8_mul, I16x8Mul, fp) \
+ V(i16x8_eq, I16x8Eq, fp) \
+ V(i16x8_ne, I16x8Ne, fp) \
+ V(i16x8_gt_s, I16x8GtS, fp) \
+ V(i16x8_ge_s, I16x8GeS, fp) \
+ V(i16x8_gt_u, I16x8GtU, fp) \
+ V(i16x8_ge_u, I16x8GeU, fp) \
+ V(i16x8_min_s, I16x8MinS, fp) \
+ V(i16x8_min_u, I16x8MinU, fp) \
+ V(i16x8_max_s, I16x8MaxS, fp) \
+ V(i16x8_max_u, I16x8MaxU, fp) \
+ V(i16x8_shl, I16x8Shl, gp) \
+ V(i16x8_shr_s, I16x8ShrS, gp) \
+ V(i16x8_shr_u, I16x8ShrU, gp) \
+ V(i8x16_add, I8x16Add, fp) \
+ V(i8x16_sub, I8x16Sub, fp) \
+ V(i8x16_eq, I8x16Eq, fp) \
+ V(i8x16_ne, I8x16Ne, fp) \
+ V(i8x16_gt_s, I8x16GtS, fp) \
+ V(i8x16_ge_s, I8x16GeS, fp) \
+ V(i8x16_gt_u, I8x16GtU, fp) \
+ V(i8x16_ge_u, I8x16GeU, fp) \
+ V(i8x16_min_s, I8x16MinS, fp) \
+ V(i8x16_min_u, I8x16MinU, fp) \
+ V(i8x16_max_s, I8x16MaxS, fp) \
+ V(i8x16_max_u, I8x16MaxU, fp) \
+ V(i8x16_shl, I8x16Shl, gp) \
+ V(i8x16_shr_s, I8x16ShrS, gp) \
+ V(i8x16_shr_u, I8x16ShrU, gp)
+
+#define EMIT_SIMD_BINOP_RR(name, op, stype) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
- op(dst.fp(), lhs.fp(), rhs.fp()); \
+ op(dst.fp(), lhs.fp(), rhs.stype()); \
}
-SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
-#undef EMIT_SIMD_BINOP
-#undef SIMD_BINOP_LIST
+SIMD_BINOP_RR_LIST(EMIT_SIMD_BINOP_RR)
+#undef EMIT_SIMD_BINOP_RR
+#undef SIMD_BINOP_RR_LIST
+
+#define SIMD_BINOP_RI_LIST(V) \
+ V(i64x2_shli, I64x2Shl) \
+ V(i64x2_shri_s, I64x2ShrS) \
+ V(i64x2_shri_u, I64x2ShrU) \
+ V(i32x4_shli, I32x4Shl) \
+ V(i32x4_shri_s, I32x4ShrS) \
+ V(i32x4_shri_u, I32x4ShrU) \
+ V(i16x8_shli, I16x8Shl) \
+ V(i16x8_shri_s, I16x8ShrS) \
+ V(i16x8_shri_u, I16x8ShrU) \
+ V(i8x16_shli, I8x16Shl) \
+ V(i8x16_shri_s, I8x16ShrS) \
+ V(i8x16_shri_u, I8x16ShrU)
+
+#define EMIT_SIMD_BINOP_RI(name, op) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
+ int32_t rhs) { \
+ op(dst.fp(), lhs.fp(), Operand(rhs)); \
+ }
+SIMD_BINOP_RI_LIST(EMIT_SIMD_BINOP_RI)
+#undef EMIT_SIMD_BINOP_RI
+#undef SIMD_BINOP_RI_LIST
#define SIMD_UNOP_LIST(V) \
V(f64x2_splat, F64x2Splat, fp, fp) \
@@ -2424,38 +2459,6 @@ void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
bailout(kSimd, "i64x2_alltrue");
}
-void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shl");
-}
-
-void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i64x2_shli");
-}
-
-void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shr_s");
-}
-
-void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i64x2_shri_s");
-}
-
-void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shr_u");
-}
-
-void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i64x2_shri_u");
-}
-
void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -2520,38 +2523,6 @@ void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
bailout(kSimd, "i32x4_bitmask");
}
-void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shl");
-}
-
-void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i32x4_shli");
-}
-
-void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shr_s");
-}
-
-void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i32x4_shri_s");
-}
-
-void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shr_u");
-}
-
-void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i32x4_shri_u");
-}
-
void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2607,38 +2578,6 @@ void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
bailout(kSimd, "i16x8_bitmask");
}
-void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shl");
-}
-
-void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i16x8_shli");
-}
-
-void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shr_s");
-}
-
-void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i16x8_shri_s");
-}
-
-void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shr_u");
-}
-
-void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i16x8_shri_u");
-}
-
void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2736,38 +2675,6 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
bailout(kSimd, "i8x16_bitmask");
}
-void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shl");
-}
-
-void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i8x16_shli");
-}
-
-void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shr_s");
-}
-
-void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i8x16_shri_s");
-}
-
-void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shr_u");
-}
-
-void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i8x16_shri_u");
-}
-
void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -3134,14 +3041,40 @@ void LiftoffAssembler::MaybeOSR() {}
void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
ValueKind kind) {
- UNIMPLEMENTED();
+ Label return_nan, done;
+ if (kind == kF32) {
+ cebr(src, src);
+ bunordered(&return_nan);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ cdbr(src, src);
+ bunordered(&return_nan);
+ }
+ b(&done);
+ bind(&return_nan);
+ StoreF32LE(src, MemOperand(dst), r0);
+ bind(&done);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ Label return_nan, done;
+ if (lane_kind == kF32) {
+ vfce(tmp_s128.fp(), src.fp(), src.fp(), Condition(1), Condition(0),
+ Condition(2));
+ b(Condition(0x5), &return_nan); // If any or all are NaN.
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ vfce(tmp_s128.fp(), src.fp(), src.fp(), Condition(1), Condition(0),
+ Condition(3));
+ b(Condition(0x5), &return_nan);
+ }
+ b(&done);
+ bind(&return_nan);
+ StoreF32LE(src.fp(), MemOperand(dst), r0);
+ bind(&done);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index d5cda7b3c4..50032eac23 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -1317,7 +1317,9 @@ void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- if (dst.gp() == rhs.gp()) {
+ if (lhs.gp() == rhs.gp()) {
+ xorq(dst.gp(), dst.gp());
+ } else if (dst.gp() == rhs.gp()) {
negq(dst.gp());
addq(dst.gp(), lhs.gp());
} else {
@@ -2335,29 +2337,6 @@ void EmitSimdShiftOpImm(LiftoffAssembler* assm, LiftoffRegister dst,
}
}
-template <bool is_signed>
-void EmitI8x16Shr(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister lhs, LiftoffRegister rhs) {
- // Same algorithm as the one in code-generator-x64.cc.
- assm->Punpckhbw(kScratchDoubleReg, lhs.fp());
- assm->Punpcklbw(dst.fp(), lhs.fp());
- // Prepare shift value
- assm->movq(kScratchRegister, rhs.gp());
- // Take shift value modulo 8.
- assm->andq(kScratchRegister, Immediate(7));
- assm->addq(kScratchRegister, Immediate(8));
- assm->Movq(liftoff::kScratchDoubleReg2, kScratchRegister);
- if (is_signed) {
- assm->Psraw(kScratchDoubleReg, liftoff::kScratchDoubleReg2);
- assm->Psraw(dst.fp(), liftoff::kScratchDoubleReg2);
- assm->Packsswb(dst.fp(), kScratchDoubleReg);
- } else {
- assm->Psrlw(kScratchDoubleReg, liftoff::kScratchDoubleReg2);
- assm->Psrlw(dst.fp(), liftoff::kScratchDoubleReg2);
- assm->Packuswb(dst.fp(), kScratchDoubleReg);
- }
-}
-
inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src) {
assm->xorq(dst.gp(), dst.gp());
@@ -2414,21 +2393,11 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
- Pinsrb(dst.fp(), dst.fp(), src_op, 0);
- Pxor(kScratchDoubleReg, kScratchDoubleReg);
- Pshufb(dst.fp(), kScratchDoubleReg);
+ S128Load8Splat(dst.fp(), src_op, kScratchDoubleReg);
} else if (memtype == MachineType::Int16()) {
- Pinsrw(dst.fp(), dst.fp(), src_op, 0);
- Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
- Punpcklqdq(dst.fp(), dst.fp());
+ S128Load16Splat(dst.fp(), src_op, kScratchDoubleReg);
} else if (memtype == MachineType::Int32()) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vbroadcastss(dst.fp(), src_op);
- } else {
- movss(dst.fp(), src_op);
- shufps(dst.fp(), dst.fp(), byte{0});
- }
+ S128Load32Splat(dst.fp(), src_op);
} else if (memtype == MachineType::Int64()) {
Movddup(dst.fp(), src_op);
}
@@ -2440,18 +2409,17 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
uintptr_t offset_imm, LoadType type,
uint8_t laneidx, uint32_t* protected_load_pc) {
Operand src_op = liftoff::GetMemOp(this, addr, offset_reg, offset_imm);
- *protected_load_pc = pc_offset();
MachineType mem_type = type.mem_type();
if (mem_type == MachineType::Int8()) {
- Pinsrb(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrb(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
} else if (mem_type == MachineType::Int16()) {
- Pinsrw(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrw(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
} else if (mem_type == MachineType::Int32()) {
- Pinsrd(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrd(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
} else {
DCHECK_EQ(MachineType::Int64(), mem_type);
- Pinsrq(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrq(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
}
}
@@ -2515,26 +2483,24 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- I8x16Swizzle(dst.fp(), lhs.fp(), rhs.fp());
+ I8x16Swizzle(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- I8x16Popcnt(dst.fp(), src.fp(), liftoff::kScratchDoubleReg2);
+ I8x16Popcnt(dst.fp(), src.fp(), kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2, kScratchRegister);
}
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pxor(kScratchDoubleReg, kScratchDoubleReg);
- Pshufb(dst.fp(), kScratchDoubleReg);
+ I8x16Splat(dst.fp(), src.gp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pshuflw(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
- Pshufd(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
+ I16x8Splat(dst.fp(), src.gp());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
@@ -2927,89 +2893,37 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_simd_rc = reg_class_for(kS128);
- LiftoffRegister tmp_simd =
- GetUnusedRegister(tmp_simd_rc, LiftoffRegList::ForRegs(dst, lhs));
- // Mask off the unwanted bits before word-shifting.
- Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- movq(kScratchRegister, rhs.gp());
- andq(kScratchRegister, Immediate(7));
- addq(kScratchRegister, Immediate(8));
- Movq(tmp_simd.fp(), kScratchRegister);
- Psrlw(kScratchDoubleReg, tmp_simd.fp());
- Packuswb(kScratchDoubleReg, kScratchDoubleReg);
-
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpand(dst.fp(), lhs.fp(), kScratchDoubleReg);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- andps(dst.fp(), kScratchDoubleReg);
- }
- subq(kScratchRegister, Immediate(8));
- Movq(tmp_simd.fp(), kScratchRegister);
- Psllw(dst.fp(), tmp_simd.fp());
+ I8x16Shl(dst.fp(), lhs.fp(), rhs.gp(), kScratchRegister, kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2);
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- byte shift = static_cast<byte>(rhs & 0x7);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsllw(dst.fp(), lhs.fp(), shift);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- psllw(dst.fp(), shift);
- }
-
- uint8_t bmask = static_cast<uint8_t>(0xff << shift);
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- movl(kScratchRegister, Immediate(mask));
- Movd(kScratchDoubleReg, kScratchRegister);
- Pshufd(kScratchDoubleReg, kScratchDoubleReg, uint8_t{0});
- Pand(dst.fp(), kScratchDoubleReg);
+ I8x16Shl(dst.fp(), lhs.fp(), rhs, kScratchRegister, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/true>(this, dst, lhs, rhs);
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs.gp(), kScratchRegister, kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2);
}
void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- Punpckhbw(kScratchDoubleReg, lhs.fp());
- Punpcklbw(dst.fp(), lhs.fp());
- uint8_t shift = (rhs & 7) + 8;
- Psraw(kScratchDoubleReg, shift);
- Psraw(dst.fp(), shift);
- Packsswb(dst.fp(), kScratchDoubleReg);
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/false>(this, dst, lhs, rhs);
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs.gp(), kScratchRegister, kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2);
}
void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- // Perform 16-bit shift, then mask away high bits.
- uint8_t shift = rhs & 7; // i.InputInt3(1);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsrlw(dst.fp(), lhs.fp(), byte{shift});
- } else if (dst != lhs) {
- Movaps(dst.fp(), lhs.fp());
- psrlw(dst.fp(), byte{shift});
- }
-
- uint8_t bmask = 0xff >> shift;
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- movl(kScratchRegister, Immediate(mask));
- Movd(kScratchDoubleReg, kScratchRegister);
- Pshufd(kScratchDoubleReg, kScratchDoubleReg, byte{0});
- Pand(dst.fp(), kScratchDoubleReg);
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs, kScratchRegister, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -3220,14 +3134,13 @@ void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
LiftoffRegister src) {
- I16x8ExtAddPairwiseI8x16S(dst.fp(), src.fp());
+ I16x8ExtAddPairwiseI8x16S(dst.fp(), src.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
LiftoffRegister src) {
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x01());
- Pmaddubsw(dst.fp(), src.fp(), op);
+ I16x8ExtAddPairwiseI8x16U(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
@@ -3259,7 +3172,7 @@ void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- I16x8Q15MulRSatS(dst.fp(), src1.fp(), src2.fp());
+ I16x8Q15MulRSatS(dst.fp(), src1.fp(), src2.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
@@ -3376,14 +3289,12 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
LiftoffRegister src) {
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i16x8_splat_0x0001());
- Pmaddwd(dst.fp(), src.fp(), op);
+ I32x4ExtAddPairwiseI16x8S(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
LiftoffRegister src) {
- I32x4ExtAddPairwiseI16x8U(dst.fp(), src.fp());
+ I32x4ExtAddPairwiseI16x8U(dst.fp(), src.fp(), kScratchDoubleReg);
}
namespace liftoff {
@@ -3574,28 +3485,12 @@ void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psrld(kScratchDoubleReg, static_cast<byte>(1));
- Andps(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psrld(dst.fp(), static_cast<byte>(1));
- Andps(dst.fp(), src.fp());
- }
+ Absps(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Pslld(kScratchDoubleReg, byte{31});
- Xorps(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Pslld(dst.fp(), byte{31});
- Xorps(dst.fp(), src.fp());
- }
+ Negps(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
@@ -3730,28 +3625,12 @@ void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psrlq(kScratchDoubleReg, byte{1});
- Andpd(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psrlq(dst.fp(), byte{1});
- Andpd(dst.fp(), src.fp());
- }
+ Abspd(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psllq(kScratchDoubleReg, static_cast<byte>(63));
- Xorpd(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psllq(dst.fp(), static_cast<byte>(63));
- Xorpd(dst.fp(), src.fp());
- }
+ Negpd(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
@@ -3842,7 +3721,7 @@ void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
LiftoffRegister src) {
- F64x2ConvertLowI32x4U(dst.fp(), src.fp());
+ F64x2ConvertLowI32x4U(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
@@ -3852,26 +3731,7 @@ void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- // NAN->0
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vcmpeqps(kScratchDoubleReg, src.fp(), src.fp());
- vpand(dst.fp(), src.fp(), kScratchDoubleReg);
- } else {
- movaps(kScratchDoubleReg, src.fp());
- cmpeqps(kScratchDoubleReg, kScratchDoubleReg);
- if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
- andps(dst.fp(), kScratchDoubleReg);
- }
- // Set top bit if >= 0 (but not -0.0!).
- Pxor(kScratchDoubleReg, dst.fp());
- // Convert to int.
- Cvttps2dq(dst.fp(), dst.fp());
- // Set top bit if >=0 is now < 0.
- Pand(kScratchDoubleReg, dst.fp());
- Psrad(kScratchDoubleReg, byte{31});
- // Set positive overflow lanes to 0x7FFFFFFF.
- Pxor(dst.fp(), kScratchDoubleReg);
+ I32x4SConvertF32x4(dst.fp(), src.fp(), kScratchDoubleReg, kScratchRegister);
}
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
@@ -4012,12 +3872,14 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
LiftoffRegister src) {
- I32x4TruncSatF64x2SZero(dst.fp(), src.fp());
+ I32x4TruncSatF64x2SZero(dst.fp(), src.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
LiftoffRegister src) {
- I32x4TruncSatF64x2UZero(dst.fp(), src.fp());
+ I32x4TruncSatF64x2UZero(dst.fp(), src.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
@@ -4322,11 +4184,7 @@ void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
popq(kScratchRegister);
target = kScratchRegister;
}
- if (FLAG_untrusted_code_mitigations) {
- RetpolineCall(target);
- } else {
- call(target);
- }
+ call(target);
}
void LiftoffAssembler::TailCallIndirect(Register target) {
@@ -4334,11 +4192,7 @@ void LiftoffAssembler::TailCallIndirect(Register target) {
popq(kScratchRegister);
target = kScratchRegister;
}
- if (FLAG_untrusted_code_mitigations) {
- RetpolineJump(target);
- } else {
- jmp(target);
- }
+ jmp(target);
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
@@ -4376,19 +4230,19 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
bind(&ret);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
if (lane_kind == kF32) {
- movaps(tmp_fp, src);
- cmpunordps(tmp_fp, tmp_fp);
+ movaps(tmp_s128.fp(), src.fp());
+ cmpunordps(tmp_s128.fp(), tmp_s128.fp());
} else {
DCHECK_EQ(lane_kind, kF64);
- movapd(tmp_fp, src);
- cmpunordpd(tmp_fp, tmp_fp);
+ movapd(tmp_s128.fp(), src.fp());
+ cmpunordpd(tmp_s128.fp(), tmp_s128.fp());
}
- pmovmskb(tmp_gp, tmp_fp);
+ pmovmskb(tmp_gp, tmp_s128.fp());
orl(Operand(dst, 0), tmp_gp);
}
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index 5a1ab579e7..a2b026eff3 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -26,6 +26,7 @@
#include <iostream>
#include "include/libplatform/libplatform.h"
+#include "include/v8-initialization.h"
#include "src/api/api-inl.h"
#include "src/base/platform/wrappers.h"
#include "src/builtins/builtins.h"
@@ -396,6 +397,11 @@ auto Engine::make(own<Config>&& config) -> own<Engine> {
if (!engine) return own<Engine>();
engine->platform = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(engine->platform.get());
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (!v8::V8::InitializeVirtualMemoryCage()) {
+ FATAL("Could not initialize the virtual memory cage");
+ }
+#endif
v8::V8::Initialize();
return make_own(seal<Engine>(engine));
}
diff --git a/deps/v8/src/wasm/c-api.h b/deps/v8/src/wasm/c-api.h
index 0dba237d30..97a8d2d5f6 100644
--- a/deps/v8/src/wasm/c-api.h
+++ b/deps/v8/src/wasm/c-api.h
@@ -9,7 +9,8 @@
#ifndef V8_WASM_C_API_H_
#define V8_WASM_C_API_H_
-#include "include/v8.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
#include "third_party/wasm-api/wasm.hh"
diff --git a/deps/v8/src/wasm/code-space-access.cc b/deps/v8/src/wasm/code-space-access.cc
index 0f71c9a224..83cb5ddea1 100644
--- a/deps/v8/src/wasm/code-space-access.cc
+++ b/deps/v8/src/wasm/code-space-access.cc
@@ -12,6 +12,12 @@ namespace internal {
namespace wasm {
thread_local int CodeSpaceWriteScope::code_space_write_nesting_level_ = 0;
+// The thread-local counter (above) is only valid if a single thread only works
+// on one module at a time. This second thread-local checks that.
+#if defined(DEBUG) && !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
+thread_local NativeModule* CodeSpaceWriteScope::current_native_module_ =
+ nullptr;
+#endif
// TODO(jkummerow): Background threads could permanently stay in
// writable mode; only the main thread has to switch back and forth.
@@ -20,6 +26,12 @@ CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule*) {
#else // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule* native_module)
: native_module_(native_module) {
+#ifdef DEBUG
+ if (code_space_write_nesting_level_ == 0) {
+ current_native_module_ = native_module;
+ }
+ DCHECK_EQ(native_module, current_native_module_);
+#endif // DEBUG
#endif // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
if (code_space_write_nesting_level_ == 0) SetWritable();
code_space_write_nesting_level_++;
diff --git a/deps/v8/src/wasm/code-space-access.h b/deps/v8/src/wasm/code-space-access.h
index 96f852e63b..788bb8eca3 100644
--- a/deps/v8/src/wasm/code-space-access.h
+++ b/deps/v8/src/wasm/code-space-access.h
@@ -55,6 +55,9 @@ class V8_NODISCARD CodeSpaceWriteScope final {
private:
static thread_local int code_space_write_nesting_level_;
+#if defined(DEBUG) && !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
+ static thread_local NativeModule* current_native_module_;
+#endif
void SetWritable() const;
void SetExecutable() const;
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 20c6b30ffc..618e8f013c 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -2224,6 +2224,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
int non_defaultable = 0;
for (uint32_t index = params_count; index < this->num_locals(); index++) {
if (!VALIDATE(this->enabled_.has_nn_locals() ||
+ this->enabled_.has_unsafe_nn_locals() ||
this->local_type(index).is_defaultable())) {
this->DecodeError(
"Cannot define function-level local of non-defaultable type %s",
@@ -2634,19 +2635,15 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return 0;
}
// +1 because the current try block is not included in the count.
- Control* target = control_at(imm.depth + 1);
- if (imm.depth + 1 < control_depth() - 1 && !target->is_try()) {
- this->DecodeError(
- "delegate target must be a try block or the function block");
- return 0;
- }
- if (target->is_try_catch() || target->is_try_catchall()) {
- this->DecodeError(
- "cannot delegate inside the catch handler of the target");
- return 0;
+ uint32_t target_depth = imm.depth + 1;
+ while (target_depth < control_depth() - 1 &&
+ (!control_at(target_depth)->is_try() ||
+ control_at(target_depth)->is_try_catch() ||
+ control_at(target_depth)->is_try_catchall())) {
+ target_depth++;
}
FallThrough();
- CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(Delegate, imm.depth + 1, c);
+ CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(Delegate, target_depth, c);
current_catch_ = c->previous_catch;
EndControl();
PopControl();
@@ -4264,7 +4261,6 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
case kExprArrayCopy: {
NON_CONST_ONLY
- CHECK_PROTOTYPE_OPCODE(gc_experiments);
ArrayIndexImmediate<validate> dst_imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, dst_imm)) return 0;
if (!VALIDATE(dst_imm.array_type->mutability())) {
@@ -4299,7 +4295,6 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + dst_imm.length + src_imm.length;
}
case kExprArrayInit: {
- CHECK_PROTOTYPE_OPCODE(gc_experiments);
if (decoding_mode != kInitExpression) {
this->DecodeError("array.init is only allowed in init. expressions");
return 0;
@@ -4368,8 +4363,6 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + imm.length;
}
case kExprRttFreshSub:
- CHECK_PROTOTYPE_OPCODE(gc_experiments);
- V8_FALLTHROUGH;
case kExprRttSub: {
IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
"type index");
@@ -4426,6 +4419,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
if (V8_LIKELY(ObjectRelatedWithRtt(obj, rtt))) {
CALL_INTERFACE(RefTest, obj, rtt, &value);
} else {
+ CALL_INTERFACE(Drop);
+ CALL_INTERFACE(Drop);
// Unrelated types. Will always fail.
CALL_INTERFACE(I32Const, &value, 0);
}
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index cd9d941a00..e520a7d680 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -134,7 +134,7 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
case ExecutionTier::kTurbofan:
result = compiler::ExecuteTurbofanWasmCompilation(
- env, func_body, func_index_, counters, detected);
+ env, wire_bytes_storage, func_body, func_index_, counters, detected);
result.for_debugging = for_debugging_;
break;
}
@@ -142,30 +142,6 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
return result;
}
-namespace {
-bool must_record_function_compilation(Isolate* isolate) {
- return isolate->logger()->is_listening_to_code_events() ||
- isolate->is_profiling();
-}
-
-PRINTF_FORMAT(3, 4)
-void RecordWasmHeapStubCompilation(Isolate* isolate, Handle<Code> code,
- const char* format, ...) {
- DCHECK(must_record_function_compilation(isolate));
-
- base::ScopedVector<char> buffer(128);
- va_list arguments;
- va_start(arguments, format);
- int len = base::VSNPrintF(buffer, format, arguments);
- CHECK_LT(0, len);
- va_end(arguments);
- Handle<String> name_str =
- isolate->factory()->NewStringFromAsciiChecked(buffer.begin());
- PROFILE(isolate, CodeCreateEvent(CodeEventListener::STUB_TAG,
- Handle<AbstractCode>::cast(code), name_str));
-}
-} // namespace
-
// static
void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
NativeModule* native_module,
@@ -243,17 +219,19 @@ void JSToWasmWrapperCompilationUnit::Execute() {
}
Handle<Code> JSToWasmWrapperCompilationUnit::Finalize() {
- Handle<Code> code;
if (use_generic_wrapper_) {
- code = isolate_->builtins()->code_handle(Builtin::kGenericJSToWasmWrapper);
- } else {
- CompilationJob::Status status = job_->FinalizeJob(isolate_);
- CHECK_EQ(status, CompilationJob::SUCCEEDED);
- code = job_->compilation_info()->code();
+ return isolate_->builtins()->code_handle(Builtin::kGenericJSToWasmWrapper);
}
- if (!use_generic_wrapper_ && must_record_function_compilation(isolate_)) {
- RecordWasmHeapStubCompilation(
- isolate_, code, "%s", job_->compilation_info()->GetDebugName().get());
+
+ CompilationJob::Status status = job_->FinalizeJob(isolate_);
+ CHECK_EQ(status, CompilationJob::SUCCEEDED);
+ Handle<Code> code = job_->compilation_info()->code();
+ if (isolate_->logger()->is_listening_to_code_events() ||
+ isolate_->is_profiling()) {
+ Handle<String> name = isolate_->factory()->NewStringFromAsciiChecked(
+ job_->compilation_info()->GetDebugName().get());
+ PROFILE(isolate_, CodeCreateEvent(CodeEventListener::STUB_TAG,
+ Handle<AbstractCode>::cast(code), name));
}
return code;
}
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index 84f34cc0ed..b8eb6b7050 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -109,9 +109,12 @@ class WasmGraphBuildingInterface {
: ControlBase(std::forward<Args>(args)...) {}
};
- explicit WasmGraphBuildingInterface(compiler::WasmGraphBuilder* builder,
- int func_index)
- : builder_(builder), func_index_(func_index) {}
+ WasmGraphBuildingInterface(compiler::WasmGraphBuilder* builder,
+ int func_index,
+ EndpointInstrumentationMode instrumentation)
+ : builder_(builder),
+ func_index_(func_index),
+ instrumentation_(instrumentation) {}
void StartFunction(FullDecoder* decoder) {
// Get the branch hints map for this function (if available)
@@ -138,7 +141,9 @@ class WasmGraphBuildingInterface {
while (index < num_locals) {
ValueType type = decoder->local_type(index);
TFNode* node;
- if (decoder->enabled_.has_nn_locals() && !type.is_defaultable()) {
+ if ((decoder->enabled_.has_nn_locals() ||
+ decoder->enabled_.has_unsafe_nn_locals()) &&
+ !type.is_defaultable()) {
DCHECK(type.is_reference());
// TODO(jkummerow): Consider using "the hole" instead, to make any
// illegal uses more obvious.
@@ -153,7 +158,9 @@ class WasmGraphBuildingInterface {
}
LoadContextIntoSsa(ssa_env);
- if (FLAG_trace_wasm) builder_->TraceFunctionEntry(decoder->position());
+ if (FLAG_trace_wasm && instrumentation_ == kInstrumentEndpoints) {
+ builder_->TraceFunctionEntry(decoder->position());
+ }
}
// Reload the instance cache entries into the Ssa Environment.
@@ -163,7 +170,11 @@ class WasmGraphBuildingInterface {
void StartFunctionBody(FullDecoder* decoder, Control* block) {}
- void FinishFunction(FullDecoder*) { builder_->PatchInStackCheckIfNeeded(); }
+ void FinishFunction(FullDecoder*) {
+ if (instrumentation_ == kInstrumentEndpoints) {
+ builder_->PatchInStackCheckIfNeeded();
+ }
+ }
void OnFirstError(FullDecoder*) {}
@@ -475,7 +486,7 @@ class WasmGraphBuildingInterface {
: decoder->stack_value(ret_count + drop_values);
GetNodes(values.begin(), stack_base, ret_count);
}
- if (FLAG_trace_wasm) {
+ if (FLAG_trace_wasm && instrumentation_ == kInstrumentEndpoints) {
builder_->TraceFunctionExit(base::VectorOf(values), decoder->position());
}
builder_->Return(base::VectorOf(values));
@@ -649,21 +660,15 @@ class WasmGraphBuildingInterface {
void CallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index, const Value args[],
Value returns[]) {
- CheckForNull null_check = func_ref.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
- DoCall(decoder, kCallRef, 0, null_check, func_ref.node, sig, sig_index,
- args, returns);
+ DoCall(decoder, kCallRef, 0, NullCheckFor(func_ref.type), func_ref.node,
+ sig, sig_index, args, returns);
}
void ReturnCallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index,
const Value args[]) {
- CheckForNull null_check = func_ref.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
- DoReturnCall(decoder, kCallRef, 0, null_check, func_ref, sig, sig_index,
- args);
+ DoReturnCall(decoder, kCallRef, 0, NullCheckFor(func_ref.type), func_ref,
+ sig, sig_index, args);
}
void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
@@ -922,23 +927,17 @@ class WasmGraphBuildingInterface {
void StructGet(FullDecoder* decoder, const Value& struct_object,
const FieldImmediate<validate>& field, bool is_signed,
Value* result) {
- CheckForNull null_check = struct_object.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
result->node = builder_->StructGet(
struct_object.node, field.struct_imm.struct_type, field.field_imm.index,
- null_check, is_signed, decoder->position());
+ NullCheckFor(struct_object.type), is_signed, decoder->position());
}
void StructSet(FullDecoder* decoder, const Value& struct_object,
const FieldImmediate<validate>& field,
const Value& field_value) {
- CheckForNull null_check = struct_object.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
builder_->StructSet(struct_object.node, field.struct_imm.struct_type,
- field.field_imm.index, field_value.node, null_check,
- decoder->position());
+ field.field_imm.index, field_value.node,
+ NullCheckFor(struct_object.type), decoder->position());
}
void ArrayNewWithRtt(FullDecoder* decoder,
@@ -967,36 +966,28 @@ class WasmGraphBuildingInterface {
void ArrayGet(FullDecoder* decoder, const Value& array_obj,
const ArrayIndexImmediate<validate>& imm, const Value& index,
bool is_signed, Value* result) {
- CheckForNull null_check = array_obj.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
- result->node =
- builder_->ArrayGet(array_obj.node, imm.array_type, index.node,
- null_check, is_signed, decoder->position());
+ result->node = builder_->ArrayGet(array_obj.node, imm.array_type,
+ index.node, NullCheckFor(array_obj.type),
+ is_signed, decoder->position());
}
void ArraySet(FullDecoder* decoder, const Value& array_obj,
const ArrayIndexImmediate<validate>& imm, const Value& index,
const Value& value) {
- CheckForNull null_check = array_obj.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
builder_->ArraySet(array_obj.node, imm.array_type, index.node, value.node,
- null_check, decoder->position());
+ NullCheckFor(array_obj.type), decoder->position());
}
void ArrayLen(FullDecoder* decoder, const Value& array_obj, Value* result) {
- CheckForNull null_check = array_obj.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
- result->node =
- builder_->ArrayLen(array_obj.node, null_check, decoder->position());
+ result->node = builder_->ArrayLen(
+ array_obj.node, NullCheckFor(array_obj.type), decoder->position());
}
void ArrayCopy(FullDecoder* decoder, const Value& dst, const Value& dst_index,
const Value& src, const Value& src_index,
const Value& length) {
- builder_->ArrayCopy(dst.node, dst_index.node, src.node, src_index.node,
+ builder_->ArrayCopy(dst.node, dst_index.node, NullCheckFor(dst.type),
+ src.node, src_index.node, NullCheckFor(src.type),
length.node, decoder->position());
}
@@ -1177,6 +1168,7 @@ class WasmGraphBuildingInterface {
const BranchHintMap* branch_hints_ = nullptr;
// Tracks loop data for loop unrolling.
std::vector<compiler::WasmLoopInfo> loop_infos_;
+ EndpointInstrumentationMode instrumentation_;
TFNode* effect() { return builder_->effect(); }
@@ -1547,7 +1539,6 @@ class WasmGraphBuildingInterface {
WRAP_CACHE_FIELD(mem_start);
WRAP_CACHE_FIELD(mem_size);
- WRAP_CACHE_FIELD(mem_mask);
#undef WRAP_CACHE_FIELD
}
}
@@ -1597,6 +1588,12 @@ class WasmGraphBuildingInterface {
builder_->TerminateThrow(effect(), control());
}
}
+
+ CheckForNull NullCheckFor(ValueType type) {
+ DCHECK(type.is_object_reference());
+ return type.is_nullable() ? CheckForNull::kWithNullCheck
+ : CheckForNull::kWithoutNullCheck;
+ }
};
} // namespace
@@ -1607,10 +1604,12 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator,
WasmFeatures* detected, const FunctionBody& body,
std::vector<compiler::WasmLoopInfo>* loop_infos,
compiler::NodeOriginTable* node_origins,
- int func_index) {
+ int func_index,
+ EndpointInstrumentationMode instrumentation) {
Zone zone(allocator, ZONE_NAME);
WasmFullDecoder<Decoder::kFullValidation, WasmGraphBuildingInterface> decoder(
- &zone, module, enabled, detected, body, builder, func_index);
+ &zone, module, enabled, detected, body, builder, func_index,
+ instrumentation);
if (node_origins) {
builder->AddBytecodePositionDecorator(node_origins, &decoder);
}
diff --git a/deps/v8/src/wasm/graph-builder-interface.h b/deps/v8/src/wasm/graph-builder-interface.h
index 6c668e2b0a..c264bc8330 100644
--- a/deps/v8/src/wasm/graph-builder-interface.h
+++ b/deps/v8/src/wasm/graph-builder-interface.h
@@ -27,12 +27,18 @@ struct FunctionBody;
class WasmFeatures;
struct WasmModule;
+enum EndpointInstrumentationMode {
+ kDoNotInstrumentEndpoints,
+ kInstrumentEndpoints
+};
+
V8_EXPORT_PRIVATE DecodeResult
BuildTFGraph(AccountingAllocator* allocator, const WasmFeatures& enabled,
const WasmModule* module, compiler::WasmGraphBuilder* builder,
WasmFeatures* detected, const FunctionBody& body,
std::vector<compiler::WasmLoopInfo>* loop_infos,
- compiler::NodeOriginTable* node_origins, int func_index);
+ compiler::NodeOriginTable* node_origins, int func_index,
+ EndpointInstrumentationMode instrumentation);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc
index db2514791b..4dc808fe33 100644
--- a/deps/v8/src/wasm/jump-table-assembler.cc
+++ b/deps/v8/src/wasm/jump-table-assembler.cc
@@ -268,6 +268,36 @@ void JumpTableAssembler::NopBytes(int bytes) {
}
}
+#elif V8_TARGET_ARCH_LOONG64
+void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
+ Address lazy_compile_target) {
+ DCHECK(is_int32(func_index));
+ int start = pc_offset();
+ li(kWasmCompileLazyFuncIndexRegister, (int32_t)func_index); // max. 2 instr
+ // Jump produces max 4 instructions.
+ Jump(lazy_compile_target, RelocInfo::NONE);
+ int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
+ DCHECK_EQ(nop_bytes % kInstrSize, 0);
+ for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
+}
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
+ PatchAndJump(target);
+ return true;
+}
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
+ JumpToInstructionStream(target);
+}
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ UNREACHABLE();
+}
+void JumpTableAssembler::NopBytes(int bytes) {
+ DCHECK_LE(0, bytes);
+ DCHECK_EQ(0, bytes % kInstrSize);
+ for (; bytes > 0; bytes -= kInstrSize) {
+ nop();
+ }
+}
+
#elif V8_TARGET_ARCH_PPC64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h
index 3963de9824..433608decb 100644
--- a/deps/v8/src/wasm/jump-table-assembler.h
+++ b/deps/v8/src/wasm/jump-table-assembler.h
@@ -224,6 +224,11 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
static constexpr int kFarJumpTableSlotSize = 6 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 10 * kInstrSize;
+#elif V8_TARGET_ARCH_LOONG64
+ static constexpr int kJumpTableLineSize = 8 * kInstrSize;
+ static constexpr int kJumpTableSlotSize = 8 * kInstrSize;
+ static constexpr int kFarJumpTableSlotSize = 4 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize;
#else
#error Unknown architecture.
#endif
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index ea714cbe4c..2d66102c1f 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -1646,12 +1646,8 @@ void CompileNativeModule(Isolate* isolate,
return;
}
- if (!FLAG_predictable) {
- // For predictable mode, do not finalize wrappers yet to make sure we catch
- // validation errors first.
- compilation_state->FinalizeJSToWasmWrappers(
- isolate, native_module->module(), export_wrappers_out);
- }
+ compilation_state->FinalizeJSToWasmWrappers(isolate, native_module->module(),
+ export_wrappers_out);
compilation_state->WaitForCompilationEvent(
CompilationEvent::kFinishedBaselineCompilation);
@@ -1663,9 +1659,6 @@ void CompileNativeModule(Isolate* isolate,
ValidateSequentially(wasm_module, native_module.get(), isolate->counters(),
isolate->allocator(), thrower, lazy_module);
CHECK(thrower->error());
- } else if (FLAG_predictable) {
- compilation_state->FinalizeJSToWasmWrappers(
- isolate, native_module->module(), export_wrappers_out);
}
}
@@ -3052,13 +3045,13 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
}
compilation_progress_.assign(module->num_declared_functions,
kProgressAfterDeserialization);
- uint32_t num_imported_functions = module->num_imported_functions;
for (auto func_index : missing_functions) {
if (FLAG_wasm_lazy_compilation) {
- native_module_->UseLazyStub(num_imported_functions + func_index);
+ native_module_->UseLazyStub(func_index);
}
- compilation_progress_[func_index] = SetupCompilationProgressForFunction(
- lazy_module, module, enabled_features, func_index);
+ compilation_progress_[declared_function_index(module, func_index)] =
+ SetupCompilationProgressForFunction(lazy_module, module,
+ enabled_features, func_index);
}
}
auto builder = std::make_unique<CompilationUnitBuilder>(native_module_);
@@ -3665,13 +3658,17 @@ WasmCode* CompileImportWrapper(
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
&env, kind, sig, source_positions, expected_arity);
- std::unique_ptr<WasmCode> wasm_code = native_module->AddCode(
- result.func_index, result.code_desc, result.frame_slot_count,
- result.tagged_parameter_slots,
- result.protected_instructions_data.as_vector(),
- result.source_positions.as_vector(), GetCodeKind(result),
- ExecutionTier::kNone, kNoDebugging);
- WasmCode* published_code = native_module->PublishCode(std::move(wasm_code));
+ WasmCode* published_code;
+ {
+ CodeSpaceWriteScope code_space_write_scope(native_module);
+ std::unique_ptr<WasmCode> wasm_code = native_module->AddCode(
+ result.func_index, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots,
+ result.protected_instructions_data.as_vector(),
+ result.source_positions.as_vector(), GetCodeKind(result),
+ ExecutionTier::kNone, kNoDebugging);
+ published_code = native_module->PublishCode(std::move(wasm_code));
+ }
(*cache_scope)[key] = published_code;
published_code->IncRef();
counters->wasm_generated_code_size()->Increment(
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index e8bd2597bc..16ac753547 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -44,9 +44,11 @@ class CompilationResultResolver;
class ErrorThrower;
class ModuleCompiler;
class NativeModule;
+class StreamingDecoder;
class WasmCode;
struct WasmModule;
+V8_EXPORT_PRIVATE
std::shared_ptr<NativeModule> CompileToNativeModule(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index b014f8a8c7..d2c78f0da5 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -563,9 +563,10 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kWasmFunctionExtendingTypeCode: {
- if (!enabled_features_.has_gc_experiments()) {
+ if (!enabled_features_.has_gc()) {
errorf(pc(),
- "nominal types need --experimental-wasm-gc-experiments");
+ "invalid function type definition, enable with "
+ "--experimental-wasm-gc");
break;
}
const FunctionSig* s = consume_sig(module_->signature_zone.get());
@@ -591,9 +592,10 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kWasmStructExtendingTypeCode: {
- if (!enabled_features_.has_gc_experiments()) {
+ if (!enabled_features_.has_gc()) {
errorf(pc(),
- "nominal types need --experimental-wasm-gc-experiments");
+ "invalid struct type definition, enable with "
+ "--experimental-wasm-gc");
break;
}
const StructType* s = consume_struct(module_->signature_zone.get());
@@ -617,9 +619,10 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kWasmArrayExtendingTypeCode: {
- if (!enabled_features_.has_gc_experiments()) {
+ if (!enabled_features_.has_gc()) {
errorf(pc(),
- "nominal types need --experimental-wasm-gc-experiments");
+ "invalid array type definition, enable with "
+ "--experimental-wasm-gc");
break;
}
const ArrayType* type = consume_array(module_->signature_zone.get());
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index f56ab55cd7..1040f77ecd 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -65,9 +65,10 @@ class CompileImportWrapperJob final : public JobTask {
}
void Run(JobDelegate* delegate) override {
- CodeSpaceWriteScope code_space_write_scope(native_module_);
while (base::Optional<WasmImportWrapperCache::CacheKey> key =
queue_->pop()) {
+ // TODO(wasm): Batch code publishing, to avoid repeated locking and
+ // permission switching.
CompileImportWrapper(native_module_, counters_, key->kind, key->signature,
key->expected_arity, cache_scope_);
if (delegate->ShouldYield()) return;
@@ -162,6 +163,7 @@ Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
map->SetInstanceDescriptors(isolate, *descriptors,
descriptors->number_of_descriptors());
map->set_is_extensible(false);
+ WasmStruct::EncodeInstanceSizeInMap(real_instance_size, *map);
return map;
}
@@ -187,6 +189,8 @@ Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
map->SetInstanceDescriptors(isolate, *descriptors,
descriptors->number_of_descriptors());
map->set_is_extensible(false);
+ WasmArray::EncodeElementSizeInMap(type->element_type().element_size_bytes(),
+ *map);
return map;
}
@@ -1035,7 +1039,8 @@ bool InstanceBuilder::ProcessImportedFunction(
if (kind == compiler::WasmImportCallKind::kJSFunctionArityMismatch) {
Handle<JSFunction> function = Handle<JSFunction>::cast(js_receiver);
SharedFunctionInfo shared = function->shared();
- expected_arity = shared.internal_formal_parameter_count();
+ expected_arity =
+ shared.internal_formal_parameter_count_without_receiver();
}
NativeModule* native_module = instance->module_object().native_module();
@@ -1439,7 +1444,8 @@ void InstanceBuilder::CompileImportWrappers(
compiler::WasmImportCallKind::kJSFunctionArityMismatch) {
Handle<JSFunction> function = Handle<JSFunction>::cast(resolved.second);
SharedFunctionInfo shared = function->shared();
- expected_arity = shared.internal_formal_parameter_count();
+ expected_arity =
+ shared.internal_formal_parameter_count_without_receiver();
}
WasmImportWrapperCache::CacheKey key(kind, sig, expected_arity);
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index d080d1285e..0c8a570c71 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -267,14 +267,17 @@ void WasmCode::LogCode(Isolate* isolate, const char* source_url,
"wasm-function[%d]", index()));
name = base::VectorOf(name_buffer);
}
- int code_offset = module->functions[index_].code.offset();
- PROFILE(isolate, CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this, name,
- source_url, code_offset, script_id));
+ // Record source positions before adding code, otherwise when code is added,
+ // there are no source positions to associate with the added code.
if (!source_positions().empty()) {
- LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(instruction_start(),
- source_positions()));
+ LOG_CODE_EVENT(isolate, WasmCodeLinePosInfoRecordEvent(instruction_start(),
+ source_positions()));
}
+
+ int code_offset = module->functions[index_].code.offset();
+ PROFILE(isolate, CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this, name,
+ source_url, code_offset, script_id));
}
void WasmCode::Validate() const {
@@ -664,12 +667,13 @@ class CheckWritableMemoryRegions {
DCHECK(std::none_of(writable_memory_.begin(), writable_memory_.end(),
[](auto region) { return region.is_empty(); }));
- // Regions are sorted and disjoint.
- std::accumulate(writable_memory_.begin(), writable_memory_.end(),
- Address{0}, [](Address previous_end, auto region) {
- DCHECK_LT(previous_end, region.begin());
- return region.end();
- });
+ // Regions are sorted and disjoint. (std::accumulate has nodiscard on msvc
+ // so USE is required to prevent build failures in debug builds).
+ USE(std::accumulate(writable_memory_.begin(), writable_memory_.end(),
+ Address{0}, [](Address previous_end, auto region) {
+ DCHECK_LT(previous_end, region.begin());
+ return region.end();
+ }));
}
private:
@@ -1032,12 +1036,9 @@ void NativeModule::LogWasmCodes(Isolate* isolate, Script script) {
// Log all owned code, not just the current entries in the code table. This
// will also include import wrappers.
- base::RecursiveMutexGuard lock(&allocation_mutex_);
- for (auto& owned_entry : owned_code_) {
- owned_entry.second->LogCode(isolate, source_url.get(), script.id());
- }
- for (auto& owned_entry : new_owned_code_) {
- owned_entry->LogCode(isolate, source_url.get(), script.id());
+ WasmCodeRefScope code_ref_scope;
+ for (auto& code : SnapshotAllOwnedCode()) {
+ code->LogCode(isolate, source_url.get(), script.id());
}
}
@@ -1179,7 +1180,6 @@ std::unique_ptr<WasmCode> NativeModule::AddCode(
ExecutionTier tier, ForDebugging for_debugging) {
base::Vector<byte> code_space;
NativeModule::JumpTablesRef jump_table_ref;
- CodeSpaceWriteScope code_space_write_scope(this);
{
base::RecursiveMutexGuard guard{&allocation_mutex_};
code_space = code_allocator_.AllocateForCode(this, desc.instr_size);
@@ -1429,6 +1429,17 @@ std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
return std::vector<WasmCode*>{start, end};
}
+std::vector<WasmCode*> NativeModule::SnapshotAllOwnedCode() const {
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
+ if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
+
+ std::vector<WasmCode*> all_code(owned_code_.size());
+ std::transform(owned_code_.begin(), owned_code_.end(), all_code.begin(),
+ [](auto& entry) { return entry.second.get(); });
+ std::for_each(all_code.begin(), all_code.end(), WasmCodeRefScope::AddRef);
+ return all_code;
+}
+
WasmCode* NativeModule::GetCode(uint32_t index) const {
base::RecursiveMutexGuard guard(&allocation_mutex_);
WasmCode* code = code_table_[declared_function_index(module(), index)];
@@ -2113,6 +2124,12 @@ bool WasmCodeManager::HasMemoryProtectionKeySupport() const {
return memory_protection_key_ != kNoMemoryProtectionKey;
}
+void WasmCodeManager::InitializeMemoryProtectionKeyForTesting() {
+ if (memory_protection_key_ == kNoMemoryProtectionKey) {
+ memory_protection_key_ = AllocateMemoryProtectionKey();
+ }
+}
+
std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
Isolate* isolate, const WasmFeatures& enabled, size_t code_size_estimate,
std::shared_ptr<const WasmModule> module) {
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 2baf46e888..70ef6d75a9 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -102,6 +102,14 @@ struct WasmModule;
IF_TSAN(V, TSANRelaxedStore32SaveFP) \
IF_TSAN(V, TSANRelaxedStore64IgnoreFP) \
IF_TSAN(V, TSANRelaxedStore64SaveFP) \
+ IF_TSAN(V, TSANSeqCstStore8IgnoreFP) \
+ IF_TSAN(V, TSANSeqCstStore8SaveFP) \
+ IF_TSAN(V, TSANSeqCstStore16IgnoreFP) \
+ IF_TSAN(V, TSANSeqCstStore16SaveFP) \
+ IF_TSAN(V, TSANSeqCstStore32IgnoreFP) \
+ IF_TSAN(V, TSANSeqCstStore32SaveFP) \
+ IF_TSAN(V, TSANSeqCstStore64IgnoreFP) \
+ IF_TSAN(V, TSANSeqCstStore64SaveFP) \
IF_TSAN(V, TSANRelaxedLoad32IgnoreFP) \
IF_TSAN(V, TSANRelaxedLoad32SaveFP) \
IF_TSAN(V, TSANRelaxedLoad64IgnoreFP) \
@@ -109,7 +117,6 @@ struct WasmModule;
V(WasmAllocateArray_Uninitialized) \
V(WasmAllocateArray_InitNull) \
V(WasmAllocateArray_InitZero) \
- V(WasmArrayCopy) \
V(WasmArrayCopyWithChecks) \
V(WasmAllocateRtt) \
V(WasmAllocateFreshRtt) \
@@ -188,25 +195,47 @@ class V8_EXPORT_PRIVATE WasmCode final {
}
#ifdef V8_IS_TSAN
- static RuntimeStubId GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode,
- int size) {
- if (size == kInt8Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? RuntimeStubId::kTSANRelaxedStore8IgnoreFP
- : RuntimeStubId::kTSANRelaxedStore8SaveFP;
- } else if (size == kInt16Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? RuntimeStubId::kTSANRelaxedStore16IgnoreFP
- : RuntimeStubId::kTSANRelaxedStore16SaveFP;
- } else if (size == kInt32Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? RuntimeStubId::kTSANRelaxedStore32IgnoreFP
- : RuntimeStubId::kTSANRelaxedStore32SaveFP;
+ static RuntimeStubId GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
+ std::memory_order order) {
+ if (order == std::memory_order_relaxed) {
+ if (size == kInt8Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore8IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore8SaveFP;
+ } else if (size == kInt16Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore16IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore16SaveFP;
+ } else if (size == kInt32Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore32IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore32SaveFP;
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore64IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore64SaveFP;
+ }
} else {
- CHECK_EQ(size, kInt64Size);
- return fp_mode == SaveFPRegsMode::kIgnore
- ? RuntimeStubId::kTSANRelaxedStore64IgnoreFP
- : RuntimeStubId::kTSANRelaxedStore64SaveFP;
+ DCHECK_EQ(order, std::memory_order_seq_cst);
+ if (size == kInt8Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANSeqCstStore8IgnoreFP
+ : RuntimeStubId::kTSANSeqCstStore8SaveFP;
+ } else if (size == kInt16Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANSeqCstStore16IgnoreFP
+ : RuntimeStubId::kTSANSeqCstStore16SaveFP;
+ } else if (size == kInt32Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANSeqCstStore32IgnoreFP
+ : RuntimeStubId::kTSANSeqCstStore32SaveFP;
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANSeqCstStore64IgnoreFP
+ : RuntimeStubId::kTSANSeqCstStore64SaveFP;
+ }
}
}
@@ -520,7 +549,7 @@ class WasmCodeAllocator {
// Make a code region writable. Only allowed if there is at lease one writer
// (see above).
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
- void MakeWritable(base::AddressRegion);
+ V8_EXPORT_PRIVATE void MakeWritable(base::AddressRegion);
// Free memory pages of all given code objects. Used for wasm code GC.
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
@@ -637,6 +666,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Creates a snapshot of the current state of the code table. This is useful
// to get a consistent view of the table (e.g. used by the serializer).
std::vector<WasmCode*> SnapshotCodeTable() const;
+ // Creates a snapshot of all {owned_code_}, will transfer new code (if any) to
+ // {owned_code_}.
+ std::vector<WasmCode*> SnapshotAllOwnedCode() const;
WasmCode* GetCode(uint32_t index) const;
bool HasCode(uint32_t index) const;
@@ -1006,6 +1038,10 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// Returns true if there is PKU support, false otherwise.
bool HasMemoryProtectionKeySupport() const;
+ // This allocates a memory protection key (if none was allocated before),
+ // independent of the --wasm-memory-protection-keys flag.
+ void InitializeMemoryProtectionKeyForTesting();
+
private:
friend class WasmCodeAllocator;
friend class WasmEngine;
@@ -1033,7 +1069,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// and updated after each GC.
std::atomic<size_t> critical_committed_code_space_;
- const int memory_protection_key_;
+ int memory_protection_key_;
mutable base::Mutex native_modules_mutex_;
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 7209096911..5cf61ef543 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -45,6 +45,7 @@ class GdbServer;
class AsyncCompileJob;
class ErrorThrower;
struct ModuleWireBytes;
+class StreamingDecoder;
class WasmFeatures;
class V8_EXPORT_PRIVATE CompilationResultResolver {
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 101d563876..6fc3278141 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -451,7 +451,6 @@ class V8_NODISCARD ThreadNotInWasmScope {
#endif
};
-#ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS
inline byte* EffectiveAddress(WasmInstanceObject instance, uint32_t index) {
return instance.memory_start() + index;
}
@@ -460,19 +459,6 @@ inline byte* EffectiveAddress(byte* base, size_t size, uint32_t index) {
return base + index;
}
-#else
-inline byte* EffectiveAddress(WasmInstanceObject instance, uint32_t index) {
- // Compute the effective address of the access, making sure to condition
- // the index even in the in-bounds case.
- return instance.memory_start() + (index & instance.memory_mask());
-}
-
-inline byte* EffectiveAddress(byte* base, size_t size, uint32_t index) {
- size_t mem_mask = base::bits::RoundUpToPowerOfTwo(size) - 1;
- return base + (index & mem_mask);
-}
-#endif
-
template <typename V>
V ReadAndIncrementOffset(Address data, size_t* offset) {
V result = ReadUnalignedValue<V>(data + *offset);
@@ -551,6 +537,53 @@ int32_t memory_fill_wrapper(Address data) {
return kSuccess;
}
+namespace {
+inline void* ArrayElementAddress(WasmArray array, uint32_t index,
+ int element_size_bytes) {
+ return reinterpret_cast<void*>(array.ptr() + WasmArray::kHeaderSize -
+ kHeapObjectTag + index * element_size_bytes);
+}
+} // namespace
+
+void array_copy_wrapper(Address raw_instance, Address raw_dst_array,
+ uint32_t dst_index, Address raw_src_array,
+ uint32_t src_index, uint32_t length) {
+ ThreadNotInWasmScope thread_not_in_wasm_scope;
+ DisallowGarbageCollection no_gc;
+ WasmArray dst_array = WasmArray::cast(Object(raw_dst_array));
+ WasmArray src_array = WasmArray::cast(Object(raw_src_array));
+
+ bool overlapping_ranges =
+ dst_array.ptr() == src_array.ptr() &&
+ (dst_index < src_index ? dst_index + length > src_index
+ : src_index + length > dst_index);
+ wasm::ValueType element_type = src_array.type()->element_type();
+ if (element_type.is_reference()) {
+ WasmInstanceObject instance =
+ WasmInstanceObject::cast(Object(raw_instance));
+ Isolate* isolate = Isolate::FromRootAddress(instance.isolate_root());
+ ObjectSlot dst_slot = dst_array.ElementSlot(dst_index);
+ ObjectSlot src_slot = src_array.ElementSlot(src_index);
+ if (overlapping_ranges) {
+ isolate->heap()->MoveRange(dst_array, dst_slot, src_slot, length,
+ UPDATE_WRITE_BARRIER);
+ } else {
+ isolate->heap()->CopyRange(dst_array, dst_slot, src_slot, length,
+ UPDATE_WRITE_BARRIER);
+ }
+ } else {
+ int element_size_bytes = element_type.element_size_bytes();
+ void* dst = ArrayElementAddress(dst_array, dst_index, element_size_bytes);
+ void* src = ArrayElementAddress(src_array, src_index, element_size_bytes);
+ size_t copy_size = length * element_size_bytes;
+ if (overlapping_ranges) {
+ MemMove(dst, src, copy_size);
+ } else {
+ MemCopy(dst, src, copy_size);
+ }
+ }
+}
+
static WasmTrapCallbackForTesting wasm_trap_callback_for_testing = nullptr;
void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback) {
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index e8363d5936..3365e109fb 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -111,6 +111,10 @@ int32_t memory_copy_wrapper(Address data);
// zero-extend the result in the return register.
int32_t memory_fill_wrapper(Address data);
+void array_copy_wrapper(Address raw_instance, Address raw_dst_array,
+ uint32_t dst_index, Address raw_src_array,
+ uint32_t src_index, uint32_t length);
+
using WasmTrapCallbackForTesting = void (*)();
V8_EXPORT_PRIVATE void set_trap_callback_for_testing(
diff --git a/deps/v8/src/wasm/wasm-feature-flags.h b/deps/v8/src/wasm/wasm-feature-flags.h
index 1c4c2acaec..ac8e8e16d7 100644
--- a/deps/v8/src/wasm/wasm-feature-flags.h
+++ b/deps/v8/src/wasm/wasm-feature-flags.h
@@ -26,8 +26,12 @@
\
/* Non-specified, V8-only experimental additions to the GC proposal */ \
/* V8 side owner: jkummerow */ \
- V(gc_experiments, "garbage collection V8-only experimental features", false) \
- V(nn_locals, "allow non-defaultable/non-nullable locals", false) \
+ V(nn_locals, \
+ "allow non-defaultable/non-nullable locals, validated with 'until end of " \
+ "block' semantics", \
+ false) \
+ V(unsafe_nn_locals, \
+ "allow non-defaultable/non-nullable locals, no validation", false) \
\
/* Typed function references proposal. */ \
/* Official proposal: https://github.com/WebAssembly/function-references */ \
@@ -47,7 +51,12 @@
/* Branch Hinting proposal. */ \
/* https://github.com/WebAssembly/branch-hinting */ \
/* V8 side owner: jkummerow */ \
- V(branch_hinting, "branch hinting", false)
+ V(branch_hinting, "branch hinting", false) \
+ \
+ /* Stack Switching proposal. */ \
+ /* https://github.com/WebAssembly/stack-switching */ \
+ /* V8 side owner: thibaudm & fgm */ \
+ V(stack_switching, "stack switching", false)
// #############################################################################
// Staged features (disabled by default, but enabled via --wasm-staging (also
@@ -58,12 +67,6 @@
// be shipped with enough lead time to the next branch to allow for
// stabilization.
#define FOREACH_WASM_STAGING_FEATURE_FLAG(V) /* (force 80 columns) */ \
- /* Exception handling proposal. */ \
- /* https://github.com/WebAssembly/exception-handling */ \
- /* V8 side owner: thibaudm */ \
- /* Staged in v8.9 */ \
- V(eh, "exception handling opcodes", false) \
- \
/* Reference Types, a.k.a. reftypes proposal. */ \
/* https://github.com/WebAssembly/reference-types */ \
/* V8 side owner: ahaas */ \
@@ -104,6 +107,13 @@
/* V8 side owner: gdeepti */ \
V(threads, "thread opcodes", true) \
\
+ /* Exception handling proposal. */ \
+ /* https://github.com/WebAssembly/exception-handling */ \
+ /* V8 side owner: thibaudm */ \
+ /* Staged in v8.9 */ \
+ /* Shipped in v9.5 */ \
+ V(eh, "exception handling opcodes", true) \
+ \
// Combination of all available wasm feature flags.
#define FOREACH_WASM_FEATURE_FLAG(V) \
FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(V) \
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index b65db60154..ef514c3b4c 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -7,6 +7,8 @@
#include <cinttypes>
#include <cstring>
+#include "include/v8-function.h"
+#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
#include "src/api/api-natives.h"
#include "src/ast/ast.h"
@@ -1115,12 +1117,25 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
i::Handle<i::FixedArray> fixed_array;
- i::Handle<i::JSObject> table_obj =
+ i::Handle<i::WasmTableObject> table_obj =
i::WasmTableObject::New(i_isolate, i::Handle<i::WasmInstanceObject>(),
type, static_cast<uint32_t>(initial), has_maximum,
static_cast<uint32_t>(maximum), &fixed_array);
+
+ if (initial > 0 && args.Length() >= 2 && !args[1]->IsUndefined()) {
+ i::Handle<i::Object> element = Utils::OpenHandle(*args[1]);
+ if (!i::WasmTableObject::IsValidElement(i_isolate, table_obj, element)) {
+ thrower.TypeError(
+ "Argument 2 must be undefined, null, or a value of type compatible "
+ "with the type of the new table.");
+ return;
+ }
+ for (uint32_t index = 0; index < static_cast<uint32_t>(initial); ++index) {
+ i::WasmTableObject::Set(i_isolate, table_obj, index, element);
+ }
+ }
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- return_value.Set(Utils::ToLocal(table_obj));
+ return_value.Set(Utils::ToLocal(i::Handle<i::JSObject>::cast(table_obj)));
}
void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -1578,7 +1593,6 @@ void EncodeExceptionValues(v8::Isolate* isolate,
case i::wasm::kBottom:
case i::wasm::kS128:
UNREACHABLE();
- break;
}
}
}
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index b7806af797..fcafb69395 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -58,9 +58,6 @@ constexpr size_t kV8MaxWasmMemories = 1;
// GC proposal. These limits are not standardized yet.
constexpr size_t kV8MaxWasmStructFields = 999;
constexpr uint32_t kV8MaxRttSubtypingDepth = 31;
-// Maximum supported by implementation: ((1<<27)-3).
-// Reason: total object size in bytes must fit into a Smi, for filler objects.
-constexpr size_t kV8MaxWasmArrayLength = 1u << 26;
constexpr size_t kV8MaxWasmArrayInitLength = 999;
static_assert(kV8MaxWasmTableSize <= 4294967295, // 2^32 - 1
diff --git a/deps/v8/src/wasm/wasm-linkage.h b/deps/v8/src/wasm/wasm-linkage.h
index 2d98055519..ecf59f9ed5 100644
--- a/deps/v8/src/wasm/wasm-linkage.h
+++ b/deps/v8/src/wasm/wasm-linkage.h
@@ -80,6 +80,15 @@ constexpr Register kGpReturnRegisters[] = {v0, v1};
constexpr DoubleRegister kFpParamRegisters[] = {f2, f4, f6, f8, f10, f12, f14};
constexpr DoubleRegister kFpReturnRegisters[] = {f2, f4};
+#elif V8_TARGET_ARCH_LOONG64
+// ===========================================================================
+// == LOONG64 ================================================================
+// ===========================================================================
+constexpr Register kGpParamRegisters[] = {a0, a2, a3, a4, a5, a6, a7};
+constexpr Register kGpReturnRegisters[] = {a0, a1};
+constexpr DoubleRegister kFpParamRegisters[] = {f0, f1, f2, f3, f4, f5, f6, f7};
+constexpr DoubleRegister kFpReturnRegisters[] = {f0, f1};
+
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
// ===========================================================================
// == ppc & ppc64 ============================================================
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 2bf20ea3ec..756900c160 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -264,7 +264,7 @@ WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
functions_(zone),
tables_(zone),
data_segments_(zone),
- indirect_functions_(zone),
+ element_segments_(zone),
globals_(zone),
exceptions_(zone),
signature_map_(zone),
@@ -323,75 +323,52 @@ uint32_t WasmModuleBuilder::AddArrayType(ArrayType* type) {
const uint32_t WasmModuleBuilder::kNullIndex =
std::numeric_limits<uint32_t>::max();
-// TODO(9495): Add support for typed function tables and more init. expressions.
-uint32_t WasmModuleBuilder::AllocateIndirectFunctions(uint32_t count) {
- DCHECK(allocating_indirect_functions_allowed_);
- uint32_t index = static_cast<uint32_t>(indirect_functions_.size());
- DCHECK_GE(FLAG_wasm_max_table_size, index);
- if (count > FLAG_wasm_max_table_size - index) {
+uint32_t WasmModuleBuilder::IncreaseTableMinSize(uint32_t table_index,
+ uint32_t count) {
+ DCHECK_LT(table_index, tables_.size());
+ uint32_t old_min_size = tables_[table_index].min_size;
+ if (count > FLAG_wasm_max_table_size - old_min_size) {
return std::numeric_limits<uint32_t>::max();
}
- uint32_t new_size = static_cast<uint32_t>(indirect_functions_.size()) + count;
- DCHECK(max_table_size_ == 0 || new_size <= max_table_size_);
- indirect_functions_.resize(new_size, kNullIndex);
- uint32_t max = max_table_size_ > 0 ? max_table_size_ : new_size;
- if (tables_.empty()) {
- // This cannot use {AddTable} because that would flip the
- // {allocating_indirect_functions_allowed_} flag.
- tables_.push_back({kWasmFuncRef, new_size, max, true, {}});
- } else {
- // There can only be the indirect function table so far, otherwise the
- // {allocating_indirect_functions_allowed_} flag would have been false.
- DCHECK_EQ(1u, tables_.size());
- DCHECK_EQ(kWasmFuncRef, tables_[0].type);
- DCHECK(tables_[0].has_maximum);
- tables_[0].min_size = new_size;
- tables_[0].max_size = max;
- }
- return index;
-}
-
-void WasmModuleBuilder::SetIndirectFunction(uint32_t indirect,
- uint32_t direct) {
- indirect_functions_[indirect] = direct;
-}
-
-void WasmModuleBuilder::SetMaxTableSize(uint32_t max) {
- DCHECK_GE(FLAG_wasm_max_table_size, max);
- DCHECK_GE(max, indirect_functions_.size());
- max_table_size_ = max;
- DCHECK(allocating_indirect_functions_allowed_);
- if (!tables_.empty()) {
- tables_[0].max_size = max;
- }
+ tables_[table_index].min_size = old_min_size + count;
+ tables_[table_index].max_size =
+ std::max(old_min_size + count, tables_[table_index].max_size);
+ return old_min_size;
}
uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size) {
-#if DEBUG
- allocating_indirect_functions_allowed_ = false;
-#endif
tables_.push_back({type, min_size, 0, false, {}});
return static_cast<uint32_t>(tables_.size() - 1);
}
uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size,
uint32_t max_size) {
-#if DEBUG
- allocating_indirect_functions_allowed_ = false;
-#endif
tables_.push_back({type, min_size, max_size, true, {}});
return static_cast<uint32_t>(tables_.size() - 1);
}
uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size,
uint32_t max_size, WasmInitExpr init) {
-#if DEBUG
- allocating_indirect_functions_allowed_ = false;
-#endif
tables_.push_back({type, min_size, max_size, true, std::move(init)});
return static_cast<uint32_t>(tables_.size() - 1);
}
+void WasmModuleBuilder::AddElementSegment(WasmElemSegment segment) {
+ element_segments_.push_back(std::move(segment));
+}
+
+void WasmModuleBuilder::SetIndirectFunction(
+ uint32_t table_index, uint32_t index_in_table,
+ uint32_t direct_function_index,
+ WasmElemSegment::FunctionIndexingMode indexing_mode) {
+ WasmElemSegment segment(zone_, kWasmFuncRef, table_index,
+ WasmInitExpr(static_cast<int>(index_in_table)));
+ segment.indexing_mode = indexing_mode;
+ segment.entries.emplace_back(WasmElemSegment::Entry::kRefFuncEntry,
+ direct_function_index);
+ AddElementSegment(std::move(segment));
+}
+
uint32_t WasmModuleBuilder::AddImport(base::Vector<const char> name,
FunctionSig* sig,
base::Vector<const char> module) {
@@ -454,8 +431,9 @@ void WasmModuleBuilder::SetMaxMemorySize(uint32_t value) {
void WasmModuleBuilder::SetHasSharedMemory() { has_shared_memory_ = true; }
namespace {
-void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
- ValueType type) {
+void WriteInitializerExpressionWithEnd(ZoneBuffer* buffer,
+ const WasmInitExpr& init,
+ ValueType type) {
switch (init.kind()) {
case WasmInitExpr::kI32Const:
buffer->write_u8(kExprI32Const);
@@ -534,7 +512,7 @@ void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
case WasmInitExpr::kStructNewWithRtt:
STATIC_ASSERT((kExprStructNewWithRtt >> 8) == kGCPrefix);
for (const WasmInitExpr& operand : init.operands()) {
- WriteInitializerExpression(buffer, operand, kWasmBottom);
+ WriteInitializerExpressionWithEnd(buffer, operand, kWasmBottom);
}
buffer->write_u8(kGCPrefix);
buffer->write_u8(static_cast<uint8_t>(kExprStructNewWithRtt));
@@ -543,7 +521,7 @@ void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
case WasmInitExpr::kArrayInit:
STATIC_ASSERT((kExprArrayInit >> 8) == kGCPrefix);
for (const WasmInitExpr& operand : init.operands()) {
- WriteInitializerExpression(buffer, operand, kWasmBottom);
+ WriteInitializerExpressionWithEnd(buffer, operand, kWasmBottom);
}
buffer->write_u8(kGCPrefix);
buffer->write_u8(static_cast<uint8_t>(kExprArrayInit));
@@ -559,7 +537,8 @@ void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
case WasmInitExpr::kRttSub:
case WasmInitExpr::kRttFreshSub:
// The operand to rtt.sub must be emitted first.
- WriteInitializerExpression(buffer, init.operands()[0], kWasmBottom);
+ WriteInitializerExpressionWithEnd(buffer, init.operands()[0],
+ kWasmBottom);
STATIC_ASSERT((kExprRttSub >> 8) == kGCPrefix);
STATIC_ASSERT((kExprRttFreshSub >> 8) == kGCPrefix);
buffer->write_u8(kGCPrefix);
@@ -571,6 +550,11 @@ void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
}
}
+void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
+ ValueType type) {
+ WriteInitializerExpressionWithEnd(buffer, init, type);
+ buffer->write_u8(kExprEnd);
+}
} // namespace
void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
@@ -705,7 +689,6 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
WriteValueType(buffer, global.type);
buffer->write_u8(global.mutability ? 1 : 0);
WriteInitializerExpression(buffer, global.init, global.type);
- buffer->write_u8(kExprEnd);
}
FixupSection(buffer, start);
}
@@ -744,31 +727,67 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
FixupSection(buffer, start);
}
- // == emit function table elements ===========================================
- if (indirect_functions_.size() > 0) {
+ // == emit element segments ==================================================
+ if (element_segments_.size() > 0) {
size_t start = EmitSection(kElementSectionCode, buffer);
- buffer->write_u8(1); // count of entries
- buffer->write_u8(0); // table index
- uint32_t first_element = 0;
- while (first_element < indirect_functions_.size() &&
- indirect_functions_[first_element] == kNullIndex) {
- first_element++;
- }
- uint32_t last_element =
- static_cast<uint32_t>(indirect_functions_.size() - 1);
- while (last_element >= first_element &&
- indirect_functions_[last_element] == kNullIndex) {
- last_element--;
- }
- buffer->write_u8(kExprI32Const); // offset
- buffer->write_u32v(first_element);
- buffer->write_u8(kExprEnd);
- uint32_t element_count = last_element - first_element + 1;
- buffer->write_size(element_count);
- for (uint32_t i = first_element; i <= last_element; i++) {
- buffer->write_size(indirect_functions_[i] + function_imports_.size());
+ buffer->write_size(element_segments_.size());
+ for (const WasmElemSegment& segment : element_segments_) {
+ bool is_active = segment.status == WasmElemSegment::kStatusActive;
+ // If this segment is expressible in the backwards-compatible syntax
+ // (before reftypes proposal), we should emit it in that syntax.
+ // This is the case if the segment is active and all entries are function
+ // references. Note that this is currently the only path that allows
+ // kRelativeToImports function indexing mode.
+ // TODO(manoskouk): Remove this logic once reftypes has shipped.
+ bool backwards_compatible =
+ is_active && segment.table_index == 0 &&
+ std::all_of(
+ segment.entries.begin(), segment.entries.end(), [](auto& entry) {
+ return entry.kind ==
+ WasmModuleBuilder::WasmElemSegment::Entry::kRefFuncEntry;
+ });
+ if (backwards_compatible) {
+ buffer->write_u8(0);
+ WriteInitializerExpression(buffer, segment.offset, segment.type);
+ buffer->write_size(segment.entries.size());
+ for (const WasmElemSegment::Entry entry : segment.entries) {
+ buffer->write_u32v(
+ segment.indexing_mode == WasmElemSegment::kRelativeToImports
+ ? entry.index
+ : entry.index +
+ static_cast<uint32_t>(function_imports_.size()));
+ }
+ } else {
+ DCHECK_EQ(segment.indexing_mode, WasmElemSegment::kRelativeToImports);
+ // If we pick the general syntax, we always explicitly emit the table
+ // index and the type, and use the expressions-as-elements syntax. I.e.
+ // the initial byte is one of 0x05, 0x06, and 0x07.
+ uint8_t kind_mask =
+ segment.status == WasmElemSegment::kStatusActive
+ ? 0b10
+ : segment.status == WasmElemSegment::kStatusDeclarative ? 0b11
+ : 0b01;
+ uint8_t expressions_as_elements_mask = 0b100;
+ buffer->write_u8(kind_mask | expressions_as_elements_mask);
+ if (is_active) {
+ buffer->write_u32v(segment.table_index);
+ WriteInitializerExpression(buffer, segment.offset, segment.type);
+ }
+ WriteValueType(buffer, segment.type);
+ buffer->write_size(segment.entries.size());
+ for (const WasmElemSegment::Entry entry : segment.entries) {
+ uint8_t opcode =
+ entry.kind == WasmElemSegment::Entry::kGlobalGetEntry
+ ? kExprGlobalGet
+ : entry.kind == WasmElemSegment::Entry::kRefFuncEntry
+ ? kExprRefFunc
+ : kExprRefNull;
+ buffer->write_u8(opcode);
+ buffer->write_u32v(entry.index);
+ buffer->write_u8(kExprEnd);
+ }
+ }
}
-
FixupSection(buffer, start);
}
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index db2091cdba..8eeac56afd 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -207,6 +207,7 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
WasmModuleBuilder* builder() const { return builder_; }
uint32_t func_index() { return func_index_; }
+ uint32_t sig_index() { return signature_index_; }
inline FunctionSig* signature();
private:
@@ -245,6 +246,68 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
WasmModuleBuilder(const WasmModuleBuilder&) = delete;
WasmModuleBuilder& operator=(const WasmModuleBuilder&) = delete;
+ // Static representation of wasm element segment (table initializer). This is
+ // different than the version in wasm-module.h.
+ class WasmElemSegment {
+ public:
+ // asm.js gives function indices starting with the first non-imported
+ // function.
+ enum FunctionIndexingMode {
+ kRelativeToImports,
+ kRelativeToDeclaredFunctions
+ };
+ enum Status {
+ kStatusActive, // copied automatically during instantiation.
+ kStatusPassive, // copied explicitly after instantiation.
+ kStatusDeclarative // purely declarative and never copied.
+ };
+ struct Entry {
+ enum Kind { kGlobalGetEntry, kRefFuncEntry, kRefNullEntry } kind;
+ uint32_t index;
+ Entry(Kind kind, uint32_t index) : kind(kind), index(index) {}
+ Entry() : kind(kRefNullEntry), index(0) {}
+ };
+
+ // Construct an active segment.
+ WasmElemSegment(Zone* zone, ValueType type, uint32_t table_index,
+ WasmInitExpr offset)
+ : type(type),
+ table_index(table_index),
+ offset(std::move(offset)),
+ entries(zone),
+ status(kStatusActive) {
+ DCHECK(IsValidOffsetKind(offset.kind()));
+ }
+
+ // Construct a passive or declarative segment, which has no table
+ // index or offset.
+ WasmElemSegment(Zone* zone, ValueType type, bool declarative)
+ : type(type),
+ table_index(0),
+ entries(zone),
+ status(declarative ? kStatusDeclarative : kStatusPassive) {
+ DCHECK(IsValidOffsetKind(offset.kind()));
+ }
+
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmElemSegment);
+
+ ValueType type;
+ uint32_t table_index;
+ WasmInitExpr offset;
+ FunctionIndexingMode indexing_mode = kRelativeToImports;
+ ZoneVector<Entry> entries;
+ Status status;
+
+ private:
+ // This ensures no {WasmInitExpr} with subexpressions is used, which would
+ // cause a memory leak because those are stored in an std::vector. Such
+ // offset would also be mistyped.
+ bool IsValidOffsetKind(WasmInitExpr::Operator kind) {
+ return kind == WasmInitExpr::kI32Const ||
+ kind == WasmInitExpr::kGlobalGet;
+ }
+ };
+
// Building methods.
uint32_t AddImport(base::Vector<const char> name, FunctionSig* sig,
base::Vector<const char> module = {});
@@ -255,16 +318,23 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
bool mutability,
base::Vector<const char> module = {});
void AddDataSegment(const byte* data, uint32_t size, uint32_t dest);
+ // Add an element segment to this {WasmModuleBuilder}. {segment}'s enties
+ // have to be initialized.
+ void AddElementSegment(WasmElemSegment segment);
+ // Helper method to create an active segment with one function. Assumes that
+ // table segment at {table_index} is typed as funcref.
+ void SetIndirectFunction(uint32_t table_index, uint32_t index_in_table,
+ uint32_t direct_function_index,
+ WasmElemSegment::FunctionIndexingMode indexing_mode);
+ // Increase the starting size of the table at {table_index} by {count}. Also
+ // increases the maximum table size if needed. Returns the former starting
+ // size, or the maximum uint32_t value if the maximum table size has been
+ // exceeded.
+ uint32_t IncreaseTableMinSize(uint32_t table_index, uint32_t count);
uint32_t AddSignature(FunctionSig* sig);
uint32_t AddException(FunctionSig* type);
uint32_t AddStructType(StructType* type);
uint32_t AddArrayType(ArrayType* type);
- // In the current implementation, it's supported to have uninitialized slots
- // at the beginning and/or end of the indirect function table, as long as
- // the filled slots form a contiguous block in the middle.
- uint32_t AllocateIndirectFunctions(uint32_t count);
- void SetIndirectFunction(uint32_t indirect, uint32_t direct);
- void SetMaxTableSize(uint32_t max);
uint32_t AddTable(ValueType type, uint32_t min_size);
uint32_t AddTable(ValueType type, uint32_t min_size, uint32_t max_size);
uint32_t AddTable(ValueType type, uint32_t min_size, uint32_t max_size,
@@ -288,10 +358,17 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
Zone* zone() { return zone_; }
+ ValueType GetTableType(uint32_t index) { return tables_[index].type; }
+
+ bool IsSignature(uint32_t index) {
+ return types_[index].kind == Type::kFunctionSig;
+ }
+
FunctionSig* GetSignature(uint32_t index) {
DCHECK(types_[index].kind == Type::kFunctionSig);
return types_[index].sig;
}
+
bool IsStructType(uint32_t index) {
return types_[index].kind == Type::kStructType;
}
@@ -304,10 +381,15 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
}
ArrayType* GetArrayType(uint32_t index) { return types_[index].array_type; }
+ WasmFunctionBuilder* GetFunction(uint32_t index) { return functions_[index]; }
int NumExceptions() { return static_cast<int>(exceptions_.size()); }
int NumTypes() { return static_cast<int>(types_.size()); }
+ int NumTables() { return static_cast<int>(tables_.size()); }
+
+ int NumFunctions() { return static_cast<int>(functions_.size()); }
+
FunctionSig* GetExceptionType(int index) {
return types_[exceptions_[index]].sig;
}
@@ -380,12 +462,11 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
ZoneVector<WasmFunctionBuilder*> functions_;
ZoneVector<WasmTable> tables_;
ZoneVector<WasmDataSegment> data_segments_;
- ZoneVector<uint32_t> indirect_functions_;
+ ZoneVector<WasmElemSegment> element_segments_;
ZoneVector<WasmGlobal> globals_;
ZoneVector<int> exceptions_;
ZoneUnorderedMap<FunctionSig, uint32_t> signature_map_;
int start_function_index_;
- uint32_t max_table_size_ = 0;
uint32_t min_memory_size_;
uint32_t max_memory_size_;
bool has_max_memory_size_;
@@ -393,8 +474,6 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
#if DEBUG
// Once AddExportedImport is called, no more imports can be added.
bool adding_imports_allowed_ = true;
- // Indirect functions must be allocated before adding extra tables.
- bool allocating_indirect_functions_allowed_ = true;
#endif
};
diff --git a/deps/v8/src/wasm/wasm-module-sourcemap.cc b/deps/v8/src/wasm/wasm-module-sourcemap.cc
index 85a171e5ac..ea03dae8e2 100644
--- a/deps/v8/src/wasm/wasm-module-sourcemap.cc
+++ b/deps/v8/src/wasm/wasm-module-sourcemap.cc
@@ -6,11 +6,18 @@
#include <algorithm>
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-json.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-primitive.h"
#include "src/api/api.h"
#include "src/base/vlq-base64.h"
namespace v8 {
+
+class String;
+
namespace internal {
namespace wasm {
diff --git a/deps/v8/src/wasm/wasm-module-sourcemap.h b/deps/v8/src/wasm/wasm-module-sourcemap.h
index fd8c1117fa..38c0358f90 100644
--- a/deps/v8/src/wasm/wasm-module-sourcemap.h
+++ b/deps/v8/src/wasm/wasm-module-sourcemap.h
@@ -12,10 +12,13 @@
#include <string>
#include <vector>
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
#include "src/base/macros.h"
namespace v8 {
+
+class String;
+
namespace internal {
namespace wasm {
// The class is for decoding and managing source map generated by a WebAssembly
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index a75d83df02..f7e9f2a975 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -186,7 +186,6 @@ bool WasmGlobalObject::SetFuncRef(Isolate* isolate, Handle<Object> value) {
// WasmInstanceObject
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_start, byte*, kMemoryStartOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_size, size_t, kMemorySizeOffset)
-PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_mask, size_t, kMemoryMaskOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, isolate_root, Address,
kIsolateRootOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, stack_limit_address, Address,
@@ -559,11 +558,26 @@ int WasmStruct::Size(const wasm::StructType* type) {
Heap::kMinObjectSizeInTaggedWords * kTaggedSize);
}
-int WasmStruct::GcSafeSize(Map map) {
- wasm::StructType* type = GcSafeType(map);
- return Size(type);
+// static
+void WasmStruct::EncodeInstanceSizeInMap(int instance_size, Map map) {
+ // WasmStructs can be bigger than the {map.instance_size_in_words} field
+ // can describe; yet we have to store the instance size somewhere on the
+ // map so that the GC can read it without relying on any other objects
+ // still being around. To solve this problem, we store the instance size
+ // in two other fields that are otherwise unused for WasmStructs.
+ STATIC_ASSERT(0xFFFF - kHeaderSize >
+ wasm::kMaxValueTypeSize * wasm::kV8MaxWasmStructFields);
+ map.SetWasmByte1(instance_size & 0xFF);
+ map.SetWasmByte2(instance_size >> 8);
+}
+
+// static
+int WasmStruct::DecodeInstanceSizeFromMap(Map map) {
+ return (map.WasmByte2() << 8) | map.WasmByte1();
}
+int WasmStruct::GcSafeSize(Map map) { return DecodeInstanceSizeFromMap(map); }
+
wasm::StructType* WasmStruct::type() const { return type(map()); }
Address WasmStruct::RawFieldAddress(int raw_offset) {
@@ -614,12 +628,7 @@ wasm::ArrayType* WasmArray::GcSafeType(Map map) {
wasm::ArrayType* WasmArray::type() const { return type(map()); }
int WasmArray::SizeFor(Map map, int length) {
- int element_size = type(map)->element_type().element_size_bytes();
- return kHeaderSize + RoundUp(element_size * length, kTaggedSize);
-}
-
-int WasmArray::GcSafeSizeFor(Map map, int length) {
- int element_size = GcSafeType(map)->element_type().element_size_bytes();
+ int element_size = DecodeElementSizeFromMap(map);
return kHeaderSize + RoundUp(element_size * length, kTaggedSize);
}
@@ -635,6 +644,14 @@ Handle<Object> WasmArray::GetElement(Isolate* isolate, Handle<WasmArray> array,
return ReadValueAt(isolate, array, element_type, offset);
}
+// static
+void WasmArray::EncodeElementSizeInMap(int element_size, Map map) {
+ map.SetWasmByte1(element_size);
+}
+
+// static
+int WasmArray::DecodeElementSizeFromMap(Map map) { return map.WasmByte1(); }
+
void WasmTypeInfo::clear_foreign_address(Isolate* isolate) {
#ifdef V8_HEAP_SANDBOX
// Due to the type-specific pointer tags for external pointers, we need to
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index a6ff80f624..a52dd7fbc5 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -17,6 +17,7 @@
#include "src/objects/struct-inl.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils/utils.h"
+#include "src/wasm/code-space-access.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
@@ -1242,21 +1243,13 @@ bool WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
void WasmInstanceObject::SetRawMemory(byte* mem_start, size_t mem_size) {
CHECK_LE(mem_size, wasm::max_mem_bytes());
#if V8_HOST_ARCH_64_BIT
- uint64_t mem_mask64 = base::bits::RoundUpToPowerOfTwo64(mem_size) - 1;
set_memory_start(mem_start);
set_memory_size(mem_size);
- set_memory_mask(mem_mask64);
#else
// Must handle memory > 2GiB specially.
CHECK_LE(mem_size, size_t{kMaxUInt32});
- uint32_t mem_mask32 =
- (mem_size > 2 * size_t{GB})
- ? 0xFFFFFFFFu
- : base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(mem_size)) -
- 1;
set_memory_start(mem_start);
set_memory_size(mem_size);
- set_memory_mask(mem_mask32);
#endif
}
@@ -1540,7 +1533,8 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
if (sig_id >= 0) {
wasm::NativeModule* native_module =
instance->module_object().native_module();
- // TODO(wasm): Cache and reuse wrapper code.
+ // TODO(wasm): Cache and reuse wrapper code, to avoid repeated compilation
+ // and permissions switching.
const wasm::WasmFeatures enabled = native_module->enabled_features();
auto resolved = compiler::ResolveWasmImportCall(
callable, sig, instance->module(), enabled);
@@ -1553,10 +1547,11 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
if (kind == compiler::WasmImportCallKind ::kJSFunctionArityMismatch) {
expected_arity = Handle<JSFunction>::cast(callable)
->shared()
- .internal_formal_parameter_count();
+ .internal_formal_parameter_count_without_receiver();
}
wasm::WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
&env, kind, sig, false, expected_arity);
+ wasm::CodeSpaceWriteScope write_scope(native_module);
std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
result.func_index, result.code_desc, result.frame_slot_count,
result.tagged_parameter_slots,
@@ -2030,7 +2025,7 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
// method. This does not apply to functions exported from asm.js however.
DCHECK_EQ(is_asm_js_module, js_function->IsConstructor());
shared->set_length(arity);
- shared->set_internal_formal_parameter_count(arity);
+ shared->set_internal_formal_parameter_count(JSParameterCount(arity));
shared->set_script(instance->module_object().script());
return Handle<WasmExportedFunction>::cast(js_function);
}
@@ -2115,7 +2110,8 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
CK kind = compiler::kDefaultImportCallKind;
if (callable->IsJSFunction()) {
SharedFunctionInfo shared = Handle<JSFunction>::cast(callable)->shared();
- expected_arity = shared.internal_formal_parameter_count();
+ expected_arity =
+ shared.internal_formal_parameter_count_without_receiver();
if (expected_arity != parameter_count) {
kind = CK::kJSFunctionArityMismatch;
}
@@ -2143,7 +2139,8 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
Factory::JSFunctionBuilder{isolate, shared, context}
.set_map(function_map)
.Build();
- js_function->shared().set_internal_formal_parameter_count(parameter_count);
+ js_function->shared().set_internal_formal_parameter_count(
+ JSParameterCount(parameter_count));
return Handle<WasmJSFunction>::cast(js_function);
}
@@ -2217,10 +2214,6 @@ Handle<AsmWasmData> AsmWasmData::New(
return result;
}
-static_assert(wasm::kV8MaxWasmArrayLength <=
- (Smi::kMaxValue - WasmArray::kHeaderSize) / kDoubleSize,
- "max Wasm array size must fit into max object size");
-
namespace wasm {
bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 11d5c265ed..d34818109b 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -356,7 +356,6 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
DECL_ACCESSORS(managed_object_maps, FixedArray)
DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
DECL_PRIMITIVE_ACCESSORS(memory_size, size_t)
- DECL_PRIMITIVE_ACCESSORS(memory_mask, size_t)
DECL_PRIMITIVE_ACCESSORS(isolate_root, Address)
DECL_PRIMITIVE_ACCESSORS(stack_limit_address, Address)
DECL_PRIMITIVE_ACCESSORS(real_stack_limit_address, Address)
@@ -397,7 +396,6 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
V(kMemoryStartOffset, kSystemPointerSize) \
V(kMemorySizeOffset, kSizetSize) \
- V(kMemoryMaskOffset, kSizetSize) \
V(kStackLimitAddressOffset, kSystemPointerSize) \
V(kImportedFunctionTargetsOffset, kSystemPointerSize) \
V(kIndirectFunctionTableTargetsOffset, kSystemPointerSize) \
@@ -903,6 +901,8 @@ class WasmStruct : public TorqueGeneratedWasmStruct<WasmStruct, WasmObject> {
static inline wasm::StructType* GcSafeType(Map map);
static inline int Size(const wasm::StructType* type);
static inline int GcSafeSize(Map map);
+ static inline void EncodeInstanceSizeInMap(int instance_size, Map map);
+ static inline int DecodeInstanceSizeFromMap(Map map);
// Returns the address of the field at given offset.
inline Address RawFieldAddress(int raw_offset);
@@ -939,7 +939,6 @@ class WasmArray : public TorqueGeneratedWasmArray<WasmArray, WasmObject> {
wasm::WasmValue GetElement(uint32_t index);
static inline int SizeFor(Map map, int length);
- static inline int GcSafeSizeFor(Map map, int length);
// Returns boxed value of the array's element.
static inline Handle<Object> GetElement(Isolate* isolate,
@@ -949,6 +948,17 @@ class WasmArray : public TorqueGeneratedWasmArray<WasmArray, WasmObject> {
// Returns the Address of the element at {index}.
Address ElementAddress(uint32_t index);
+ static int MaxLength(const wasm::ArrayType* type) {
+ // The total object size must fit into a Smi, for filler objects. To make
+ // the behavior of Wasm programs independent from the Smi configuration,
+ // we hard-code the smaller of the two supported ranges.
+ int element_shift = type->element_type().element_size_log2();
+ return (SmiTagging<4>::kSmiMaxValue - kHeaderSize) >> element_shift;
+ }
+
+ static inline void EncodeElementSizeInMap(int element_size, Map map);
+ static inline int DecodeElementSizeFromMap(Map map);
+
DECL_PRINTER(WasmArray)
class BodyDescriptor;
diff --git a/deps/v8/src/web-snapshot/web-snapshot.cc b/deps/v8/src/web-snapshot/web-snapshot.cc
index 5e8ae15c0b..3e2aa43067 100644
--- a/deps/v8/src/web-snapshot/web-snapshot.cc
+++ b/deps/v8/src/web-snapshot/web-snapshot.cc
@@ -6,7 +6,11 @@
#include <limits>
-#include "include/v8.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-primitive.h"
+#include "include/v8-script.h"
#include "src/api/api-inl.h"
#include "src/base/platform/wrappers.h"
#include "src/handles/handles.h"
@@ -1513,15 +1517,14 @@ void WebSnapshotDeserializer::ReadValue(
case ValueType::REGEXP: {
Handle<String> pattern = ReadString(false);
Handle<String> flags_string = ReadString(false);
- bool success = false;
- JSRegExp::Flags flags =
- JSRegExp::FlagsFromString(isolate_, flags_string, &success);
- if (!success) {
+ base::Optional<JSRegExp::Flags> flags =
+ JSRegExp::FlagsFromString(isolate_, flags_string);
+ if (!flags.has_value()) {
Throw("Web snapshot: Malformed flags in regular expression");
return;
}
MaybeHandle<JSRegExp> maybe_regexp =
- JSRegExp::New(isolate_, pattern, flags);
+ JSRegExp::New(isolate_, pattern, flags.value());
if (!maybe_regexp.ToHandle(&value)) {
Throw("Web snapshot: Malformed RegExp");
return;